diff -Nru cargo-0.44.1/azure-pipelines.yml cargo-0.47.0/azure-pipelines.yml --- cargo-0.44.1/azure-pipelines.yml 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/azure-pipelines.yml 2020-07-17 20:39:39.000000000 +0000 @@ -4,8 +4,6 @@ - '*' exclude: - master -pr: -- master jobs: - job: Linux @@ -43,6 +41,9 @@ x86_64-msvc: TOOLCHAIN: stable-x86_64-pc-windows-msvc OTHER_TARGET: i686-pc-windows-msvc + x86_64-gnu: + TOOLCHAIN: nightly-x86_64-pc-windows-gnu + OTHER_TARGET: i686-pc-windows-gnu - job: rustfmt pool: @@ -100,7 +101,7 @@ - bash: | set -e mkdir mdbook - curl -Lf https://github.com/rust-lang-nursery/mdBook/releases/download/v0.3.1/mdbook-v0.3.1-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=./mdbook + curl -Lf https://github.com/rust-lang/mdBook/releases/download/v0.3.7/mdbook-v0.3.7-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=./mdbook echo "##vso[task.prependpath]`pwd`/mdbook" displayName: "Install mdbook" - bash: cargo doc --no-deps diff -Nru cargo-0.44.1/Cargo.toml cargo-0.47.0/Cargo.toml --- cargo-0.44.1/Cargo.toml 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/Cargo.toml 2020-07-17 20:39:39.000000000 +0000 @@ -1,6 +1,6 @@ [package] name = "cargo" -version = "0.44.0" +version = "0.47.0" edition = "2018" authors = ["Yehuda Katz ", "Carl Lerche ", @@ -22,8 +22,7 @@ atty = "0.2" bytesize = "1.0" cargo-platform = { path = "crates/cargo-platform", version = "0.1.1" } -crates-io = { path = "crates/crates-io", version = "0.31" } -crossbeam-channel = "0.4" +crates-io = { path = "crates/crates-io", version = "0.31.1" } crossbeam-utils = "0.7" crypto-hash = "0.3.1" curl = { version = "0.4.23", features = ["http2"] } @@ -31,10 +30,9 @@ env_logger = "0.7.0" pretty_env_logger = { version = "0.4", optional = true } anyhow = "1.0" -filetime = "0.2" -flate2 = { version = "1.0.3", features = ["zlib"] } -fs2 = "0.4" -git2 = "0.13.0" +filetime = "0.2.9" +flate2 = { version = "1.0.3", default-features = false, features = ["zlib"] } +git2 = "0.13.5" git2-curl = "0.14.0" glob = "0.3.0" hex = "0.4" @@ -46,15 +44,14 @@ lazycell = "1.2.0" libc = "0.2" log = "0.4.6" -libgit2-sys = "0.12.0" +libgit2-sys = "0.12.7" memchr = "2.1.3" num_cpus = "1.0" opener = "0.4" percent-encoding = "2.0" -remove_dir_all = "0.5.2" rustfix = "0.5.0" same-file = "1" -semver = { version = "0.9.0", features = ["serde"] } +semver = { version = "0.10", features = ["serde"] } serde = { version = "1.0.82", features = ["derive"] } serde_ignored = "0.1.0" serde_json = { version = "1.0.30", features = ["raw_value"] } @@ -64,12 +61,13 @@ tempfile = "3.0" termcolor = "1.0" toml = "0.5.3" +unicode-xid = "0.2.0" url = "2.0" walkdir = "2.2" clap = "2.31.2" unicode-width = "0.1.5" openssl = { version = '0.10.11', optional = true } -im-rc = "14.0.0" +im-rc = "15.0.0" # A noop dependency that changes in the Rust repository, it's a bit of a hack. # See the `src/tools/rustc-workspace-hack/README.md` file in `rust-lang/rust` @@ -77,7 +75,7 @@ rustc-workspace-hack = "1.0.0" [target.'cfg(target_os = "macos")'.dependencies] -core-foundation = { version = "0.7.0", features = ["mac_os_10_7_support"] } +core-foundation = { version = "0.9.0", features = ["mac_os_10_7_support"] } [target.'cfg(windows)'.dependencies] miow = "0.3.1" diff -Nru cargo-0.44.1/CHANGELOG.md cargo-0.47.0/CHANGELOG.md --- cargo-0.44.1/CHANGELOG.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/CHANGELOG.md 2020-07-17 20:39:39.000000000 +0000 @@ -1,18 +1,361 @@ # Changelog +## Cargo 1.46 (2020-08-27) +[9fcb8c1d...HEAD](https://github.com/rust-lang/cargo/compare/9fcb8c1d...HEAD) + +### Added + +### Changed +- A warning is now displayed if a git dependency includes a `#` fragment in + the URL. This was potentially confusing because Cargo itself displays git + URLs with this syntax, but it does not have any meaning outside of the + `Cargo.lock` file, and would not work properly. + [#8297](https://github.com/rust-lang/cargo/pull/8297) + +### Fixed +- Fixed a rare situation where an update to `Cargo.lock` failed once, but then + subsequent runs allowed it proceed. + [#8274](https://github.com/rust-lang/cargo/pull/8274) +- Removed assertion that Windows dylibs must have a `.dll` extension. Some + custom JSON spec targets may change the extension. + [#8310](https://github.com/rust-lang/cargo/pull/8310) +- Updated libgit2, which brings in a fix for zlib errors for some remote + git servers like googlesource.com. + [#8320](https://github.com/rust-lang/cargo/pull/8320) + +### Nightly only +- Added `-Zrustdoc-map` feature which provides external mappings for rustdoc + (such as https://docs.rs/ links). + [docs](https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#rustdoc-map) + [#8287](https://github.com/rust-lang/cargo/pull/8287) +- Fixed feature calculation when a proc-macro is declared in `Cargo.toml` with + an underscore (like `proc_macro = true`). + [#8319](https://github.com/rust-lang/cargo/pull/8319) + + +## Cargo 1.45 (2020-07-16) +[ebda5065e...rust-1.45.0](https://github.com/rust-lang/cargo/compare/ebda5065...rust-1.45.0) + +### Added + +### Changed +- Changed official documentation to recommend `.cargo/config.toml` filenames + (with the `.toml` extension). `.toml` extension support was added in 1.39. + [#8121](https://github.com/rust-lang/cargo/pull/8121) +- The `registry.index` config value is no longer allowed (it has been + deprecated for 4 years). + [#7973](https://github.com/rust-lang/cargo/pull/7973) +- An error is generated if both `--index` and `--registry` are passed + (previously `--index` was silently ignored). + [#7973](https://github.com/rust-lang/cargo/pull/7973) +- The `registry.token` config value is no longer used with the `--index` flag. + This is intended to avoid potentially leaking the crates.io token to another + registry. + [#7973](https://github.com/rust-lang/cargo/pull/7973) +- Added a warning if `registry.token` is used with source replacement. It is + intended this will be an error in future versions. + [#7973](https://github.com/rust-lang/cargo/pull/7973) +- Windows GNU targets now copy `.dll.a` import library files for DLL crate + types to the output directory. + [#8141](https://github.com/rust-lang/cargo/pull/8141) +- Dylibs for all dependencies are now unconditionally copied to the output + directory. Some obscure scenarios can cause an old dylib to be referenced + between builds, and this ensures that all the latest copies are used. + [#8139](https://github.com/rust-lang/cargo/pull/8139) +- `package.exclude` can now match directory names. If a directory is + specified, the entire directory will be excluded, and Cargo will not attempt + to inspect it further. Previously Cargo would try to check every file in the + directory which could cause problems if the directory contained unreadable + files. + [#8095](https://github.com/rust-lang/cargo/pull/8095) +- When packaging with `cargo publish` or `cargo package`, Cargo can use git to + guide its decision on which files to include. Previously this git-based + logic required a `Cargo.toml` file to exist at the root of the repository. + This is no longer required, so Cargo will now use git-based guidance even if + there is not a `Cargo.toml` in the root of the repository. + [#8095](https://github.com/rust-lang/cargo/pull/8095) +- While unpacking a crate on Windows, if it fails to write a file because the + file is a reserved Windows filename (like "aux.rs"), Cargo will display an + extra message to explain why it failed. + [#8136](https://github.com/rust-lang/cargo/pull/8136) +- Failures to set mtime on files are now ignored. Some filesystems did not + support this. + [#8185](https://github.com/rust-lang/cargo/pull/8185) +- Certain classes of git errors will now recommend enabling + `net.git-fetch-with-cli`. + [#8166](https://github.com/rust-lang/cargo/pull/8166) +- When doing an LTO build, Cargo will now instruct rustc not to perform + codegen when possible. This may result in a faster build and use less disk + space. Additionally, for non-LTO builds, Cargo will instruct rustc to not + embed LLVM bitcode in libraries, which should decrease their size. + [#8192](https://github.com/rust-lang/cargo/pull/8192) + [#8226](https://github.com/rust-lang/cargo/pull/8226) + [#8254](https://github.com/rust-lang/cargo/pull/8254) +- The implementation for `cargo clean -p` has been rewritten so that it can + more accurately remove the files for a specific package. + [#8210](https://github.com/rust-lang/cargo/pull/8210) +- The way Cargo computes the outputs from a build has been rewritten to be + more complete and accurate. Newly tracked files will be displayed in JSON + messages, and may be uplifted to the output directory in some cases. Some of + the changes from this are: + + - `.exp` export files on Windows MSVC dynamic libraries are now tracked. + - Proc-macros on Windows track import/export files. + - All targets (like tests, etc.) that generate separate debug files + (pdb/dSYM) are tracked. + - Added .map files for wasm32-unknown-emscripten. + - macOS dSYM directories are tracked for all dynamic libraries + (dylib/cdylib/proc-macro) and for build scripts. + + There are a variety of other changes as a consequence of this: + + - Binary examples on Windows MSVC with a hyphen will now show up twice in + the examples directory (`foo_bar.exe` and `foo-bar.exe`). Previously Cargo + just renamed the file instead of hard-linking it. + - Example libraries now follow the same rules for hyphen/underscore + translation as normal libs (they will now use underscores). + + [#8210](https://github.com/rust-lang/cargo/pull/8210) +- Cargo attempts to scrub any secrets from the debug log for HTTP debugging. + [#8222](https://github.com/rust-lang/cargo/pull/8222) +- Context has been added to many of Cargo's filesystem operations, so that + error messages now provide more information, such as the path that caused + the problem. + [#8232](https://github.com/rust-lang/cargo/pull/8232) +- Several commands now ignore the error if stdout or stderr is closed while it + is running. For example `cargo install --list | grep -q cargo-fuzz` would + previously sometimes panic because `grep -q` may close stdout before the + command finishes. Regular builds continue to fail if stdout or stderr is + closed, matching the behavior of many other build systems. + [#8236](https://github.com/rust-lang/cargo/pull/8236) +- If `cargo install` is given an exact version, like `--version=1.2.3`, it + will now avoid updating the index if that version is already installed, and + exit quickly indicating it is already installed. + [#8022](https://github.com/rust-lang/cargo/pull/8022) +- Changes to the `[patch]` section will now attempt to automatically update + `Cargo.lock` to the new version. It should now also provide better error + messages for the rare cases where it is unable to automatically update. + [#8248](https://github.com/rust-lang/cargo/pull/8248) + +### Fixed +- Fixed copying Windows `.pdb` files to the output directory when the filename + contained dashes. + [#8123](https://github.com/rust-lang/cargo/pull/8123) +- Fixed error where Cargo would fail when scanning if a package is inside a + git repository when any of its ancestor paths is a symlink. + [#8186](https://github.com/rust-lang/cargo/pull/8186) +- Fixed `cargo update` with an unused `[patch]` so that it does not get + stuck and refuse to update. + [#8243](https://github.com/rust-lang/cargo/pull/8243) +- Fixed a situation where Cargo would hang if stderr is closed, and the + compiler generated a large number of messages. + [#8247](https://github.com/rust-lang/cargo/pull/8247) +- Fixed backtraces on macOS not showing filenames or line numbers. As a + consequence of this, binary executables on apple targets do not include a + hash in the filename in Cargo's cache. This means Cargo can only track one + copy, so if you switch features or rustc versions, Cargo will need to + rebuild the executable. + [#8329](https://github.com/rust-lang/cargo/pull/8329) + [#8335](https://github.com/rust-lang/cargo/pull/8335) +- Fixed fingerprinting when using lld on Windows with a dylib. Cargo was + erroneously thinking the dylib was never fresh. + [#8290](https://github.com/rust-lang/cargo/pull/8290) + [#8335](https://github.com/rust-lang/cargo/pull/8335) + +### Nightly only +- Fixed passing the full path for `--target` to `rustdoc` when using JSON spec + targets. + [#8094](https://github.com/rust-lang/cargo/pull/8094) +- `-Cembed-bitcode=no` renamed to `-Cbitcode-in-rlib=no` + [#8134](https://github.com/rust-lang/cargo/pull/8134) +- Added new `resolver` field to `Cargo.toml` to opt-in to the new feature + resolver. + [#8129](https://github.com/rust-lang/cargo/pull/8129) +- `-Zbuild-std` no longer treats std dependencies as "local". This means that + it won't use incremental compilation for those dependencies, removes them + from dep-info files, and caps lints at "allow". + [#8177](https://github.com/rust-lang/cargo/pull/8177) +- Added `-Zmultitarget` which allows multiple `--target` flags to build the + same thing for multiple targets at once. + [docs](https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#multitarget) + [#8167](https://github.com/rust-lang/cargo/pull/8167) +- Added `strip` option to the profile to remove symbols and debug information. + [docs](https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#profile-strip-option) + [#8246](https://github.com/rust-lang/cargo/pull/8246) +- Fixed panic with `cargo tree --target=all -Zfeatures=all`. + [#8269](https://github.com/rust-lang/cargo/pull/8269) + +## Cargo 1.44 (2020-06-04) +[bda50510...rust-1.44.0](https://github.com/rust-lang/cargo/compare/bda50510...rust-1.44.0) + +### Added +- 🔥 Added the `cargo tree` command. + [docs](https://doc.rust-lang.org/nightly/cargo/commands/cargo-tree.html) + [#8062](https://github.com/rust-lang/cargo/pull/8062) +- Added warnings if a package has Windows-restricted filenames (like `nul`, + `con`, `aux`, `prn`, etc.). + [#7959](https://github.com/rust-lang/cargo/pull/7959) +- Added a `"build-finished"` JSON message when compilation is complete so that + tools can detect when they can stop listening for JSON messages with + commands like `cargo run` or `cargo test`. + [#8069](https://github.com/rust-lang/cargo/pull/8069) + +### Changed +- Valid package names are now restricted to Unicode XID identifiers. This is + mostly the same as before, except package names cannot start with a number + or `-`. + [#7959](https://github.com/rust-lang/cargo/pull/7959) +- `cargo new` and `init` will warn or reject additional package names + (reserved Windows names, reserved Cargo directories, non-ASCII names, + conflicting std names like `core`, etc.). + [#7959](https://github.com/rust-lang/cargo/pull/7959) +- Tests are no longer hard-linked into the output directory (`target/debug/`). + This ensures tools will have access to debug symbols and execute tests in + the same way as Cargo. Tools should use JSON messages to discover the path + to the executable. + [#7965](https://github.com/rust-lang/cargo/pull/7965) +- Updating git submodules now displays an "Updating" message for each + submodule. + [#7989](https://github.com/rust-lang/cargo/pull/7989) +- File modification times are now preserved when extracting a `.crate` file. + This reverses the change made in 1.40 where the mtime was not preserved. + [#7935](https://github.com/rust-lang/cargo/pull/7935) +- Build script warnings are now displayed separately when the build script + fails. + [#8017](https://github.com/rust-lang/cargo/pull/8017) +- Removed the `git-checkout` subcommand. + [#8040](https://github.com/rust-lang/cargo/pull/8040) +- The progress bar is now enabled for all unix platforms. Previously it was + only Linux, macOS, and FreeBSD. + [#8054](https://github.com/rust-lang/cargo/pull/8054) +- Artifacts generated by pre-release versions of `rustc` now share the same + filenames. This means that changing nightly versions will not leave stale + files in the build directory. + [#8073](https://github.com/rust-lang/cargo/pull/8073) +- Invalid package names are rejected when using renamed dependencies. + [#8090](https://github.com/rust-lang/cargo/pull/8090) +- Added a certain class of HTTP2 errors as "spurious" that will get retried. + [#8102](https://github.com/rust-lang/cargo/pull/8102) +- Allow `cargo package --list` to succeed, even if there are other validation + errors (such as `Cargo.lock` generation problem, or missing dependencies). + [#8175](https://github.com/rust-lang/cargo/pull/8175) + [#8215](https://github.com/rust-lang/cargo/pull/8215) + +### Fixed +- Cargo no longer buffers excessive amounts of compiler output in memory. + [#7838](https://github.com/rust-lang/cargo/pull/7838) +- Symbolic links in git repositories now work on Windows. + [#7996](https://github.com/rust-lang/cargo/pull/7996) +- Fixed an issue where `profile.dev` was not loaded from a config file with + `cargo test` when the `dev` profile was not defined in `Cargo.toml`. + [#8012](https://github.com/rust-lang/cargo/pull/8012) +- When a binary is built as an implicit dependency of an integration test, + it now checks `dep_name/feature_name` syntax in `required-features` correctly. + [#8020](https://github.com/rust-lang/cargo/pull/8020) +- Fixed an issue where Cargo would not detect that an executable (such as an + integration test) needs to be rebuilt when the previous build was + interrupted with Ctrl-C. + [#8087](https://github.com/rust-lang/cargo/pull/8087) +- Protect against some (unknown) situations where Cargo could panic when the + system monotonic clock doesn't appear to be monotonic. + [#8114](https://github.com/rust-lang/cargo/pull/8114) +- Fixed panic with `cargo clean -p` if the package has a build script. + [#8216](https://github.com/rust-lang/cargo/pull/8216) + +### Nightly only +- Fixed panic with new feature resolver and required-features. + [#7962](https://github.com/rust-lang/cargo/pull/7962) +- Added `RUSTC_WORKSPACE_WRAPPER` environment variable, which provides a way + to wrap `rustc` for workspace members only, and affects the filename hash so + that artifacts produced by the wrapper are cached separately. This usage can + be seen on nightly clippy with `cargo clippy -Zunstable-options`. + [#7533](https://github.com/rust-lang/cargo/pull/7533) +- Added `--unit-graph` CLI option to display Cargo's internal dependency graph + as JSON. + [#7977](https://github.com/rust-lang/cargo/pull/7977) +- Changed `-Zbuild_dep` to `-Zhost_dep`, and added proc-macros to the feature + decoupling logic. + [#8003](https://github.com/rust-lang/cargo/pull/8003) + [#8028](https://github.com/rust-lang/cargo/pull/8028) +- Fixed so that `--crate-version` is not automatically passed when the flag + is found in `RUSTDOCFLAGS`. + [#8014](https://github.com/rust-lang/cargo/pull/8014) +- Fixed panic with `-Zfeatures=dev_dep` and `check --profile=test`. + [#8027](https://github.com/rust-lang/cargo/pull/8027) +- Fixed panic with `-Zfeatures=itarget` with certain host dependencies. + [#8048](https://github.com/rust-lang/cargo/pull/8048) +- Added support for `-Cembed-bitcode=no`, which provides a performance boost + and disk-space usage reduction for non-LTO builds. + [#8066](https://github.com/rust-lang/cargo/pull/8066) +- `-Zpackage-features` has been extended with several changes intended to make + it easier to select features on the command-line in a workspace. + [#8074](https://github.com/rust-lang/cargo/pull/8074) + ## Cargo 1.43 (2020-04-23) -[9d32b7b0...HEAD](https://github.com/rust-lang/cargo/compare/9d32b7b0...HEAD) +[9d32b7b0...rust-1.43.0](https://github.com/rust-lang/cargo/compare/9d32b7b0...rust-1.43.0) ### Added - 🔥 Profiles may now be specified in config files (and environment variables). + [docs](https://doc.rust-lang.org/nightly/cargo/reference/config.html#profile) [#7823](https://github.com/rust-lang/cargo/pull/7823) +- ❗ Added `CARGO_BIN_EXE_` environment variable when building + integration tests. This variable contains the path to any `[[bin]]` targets + in the package. Integration tests should use the `env!` macro to determine + the path to a binary to execute. + [docs](https://doc.rust-lang.org/nightly/cargo/reference/environment-variables.html#environment-variables-cargo-sets-for-crates) + [#7697](https://github.com/rust-lang/cargo/pull/7697) ### Changed - `cargo install --git` now honors workspaces in a git repository. This allows workspace settings, like `[patch]`, `[replace]`, or `[profile]` to be used. [#7768](https://github.com/rust-lang/cargo/pull/7768) +- `cargo new` will now run `rustfmt` on the new files to pick up rustfmt + settings like `tab_spaces` so that the new file matches the user's preferred + indentation settings. + [#7827](https://github.com/rust-lang/cargo/pull/7827) +- Environment variables printed with "very verbose" output (`-vv`) are now + consistently sorted. + [#7877](https://github.com/rust-lang/cargo/pull/7877) +- Debug logging for fingerprint rebuild-detection now includes more information. + [#7888](https://github.com/rust-lang/cargo/pull/7888) + [#7890](https://github.com/rust-lang/cargo/pull/7890) + [#7952](https://github.com/rust-lang/cargo/pull/7952) +- Added warning during publish if the license-file doesn't exist. + [#7905](https://github.com/rust-lang/cargo/pull/7905) +- The `license-file` file is automatically included during publish, even if it + is not explicitly listed in the `include` list or is in a location outside + of the root of the package. + [#7905](https://github.com/rust-lang/cargo/pull/7905) +- `CARGO_CFG_DEBUG_ASSERTIONS` and `CARGO_CFG_PROC_MACRO` are no longer set + when running a build script. These were inadvertently set in the past, but + had no meaning as they were always true. Additionally, `cfg(proc-macro)` + is no longer supported in a `target` expression. + [#7943](https://github.com/rust-lang/cargo/pull/7943) + [#7970](https://github.com/rust-lang/cargo/pull/7970) ### Fixed +- Global command-line flags now work with aliases (like `cargo -v b`). + [#7837](https://github.com/rust-lang/cargo/pull/7837) +- Required-features using dependency syntax (like `renamed_dep/feat_name`) now + handle renamed dependencies correctly. + [#7855](https://github.com/rust-lang/cargo/pull/7855) +- Fixed a rare situation where if a build script is run multiple times during + the same build, Cargo will now keep the results separate instead of losing + the output of the first execution. + [#7857](https://github.com/rust-lang/cargo/pull/7857) +- Fixed incorrect interpretation of environment variable + `CARGO_TARGET_*_RUNNER=true` as a boolean. Also improved related env var + error messages. + [#7891](https://github.com/rust-lang/cargo/pull/7891) +- Updated internal libgit2 library, bringing various fixes to git support. + [#7939](https://github.com/rust-lang/cargo/pull/7939) +- `cargo package` / `cargo publish` should no longer buffer the entire + contents of each file in memory. + [#7946](https://github.com/rust-lang/cargo/pull/7946) +- Ignore more invalid `Cargo.toml` files in a git dependency. Cargo currently + walks the entire repo to find the requested package. Certain invalid + manifests were already skipped, and now it should skip all of them. + [#7947](https://github.com/rust-lang/cargo/pull/7947) ### Nightly only - Added `build.out-dir` config variable to set the output directory. @@ -20,8 +363,18 @@ - Added `-Zjobserver-per-rustc` feature to support improved performance for parallel rustc. [#7731](https://github.com/rust-lang/cargo/pull/7731) - - +- Fixed filename collision with `build-std` and crates like `cc`. + [#7860](https://github.com/rust-lang/cargo/pull/7860) +- `-Ztimings` will now save its report even if there is an error. + [#7872](https://github.com/rust-lang/cargo/pull/7872) +- Updated `--config` command-line flag to support taking a path to a config + file to load. + [#7901](https://github.com/rust-lang/cargo/pull/7901) +- Added new feature resolver. + [#7820](https://github.com/rust-lang/cargo/pull/7820) +- Rustdoc docs now automatically include the version of the package in the + side bar (requires `-Z crate-versions` flag). + [#7903](https://github.com/rust-lang/cargo/pull/7903) ## Cargo 1.42 (2020-03-12) [0bf7aafe...rust-1.42.0](https://github.com/rust-lang/cargo/compare/0bf7aafe...rust-1.42.0) @@ -92,7 +445,7 @@ ### Nightly only - `build-std` now uses `--extern` instead of `--sysroot` to find sysroot - pacakges. + packages. [#7699](https://github.com/rust-lang/cargo/pull/7699) - Added `--config` command-line option to set config settings. [#7649](https://github.com/rust-lang/cargo/pull/7649) @@ -1033,7 +1386,7 @@ [#5984](https://github.com/rust-lang/cargo/pull/5984) [#5989](https://github.com/rust-lang/cargo/pull/5989) - Added support for `target.'cfg(…)'.runner` config value to specify the - run/test/bench runner for config-expressioned targets. + run/test/bench runner for targets that use config expressions. [#5959](https://github.com/rust-lang/cargo/pull/5959) ### Changed diff -Nru cargo-0.44.1/ci/azure-install-rust.yml cargo-0.47.0/ci/azure-install-rust.yml --- cargo-0.44.1/ci/azure-install-rust.yml 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/ci/azure-install-rust.yml 2020-07-17 20:39:39.000000000 +0000 @@ -4,8 +4,8 @@ rustup set profile minimal rustup component remove --toolchain=$TOOLCHAIN rust-docs || echo "already removed" rustup update --no-self-update $TOOLCHAIN - if [ "$TOOLCHAIN" = "nightly" ]; then - rustup component add --toolchain=$TOOLCHAIN rustc-dev + if [[ "$TOOLCHAIN" == "nightly"* ]]; then + rustup component add --toolchain=$TOOLCHAIN rustc-dev llvm-tools-preview rust-docs fi rustup default $TOOLCHAIN displayName: Install rust diff -Nru cargo-0.44.1/ci/azure-test-all.yml cargo-0.47.0/ci/azure-test-all.yml --- cargo-0.44.1/ci/azure-test-all.yml 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/ci/azure-test-all.yml 2020-07-17 20:39:39.000000000 +0000 @@ -11,11 +11,6 @@ displayName: "Install gcc-multilib (linux)" condition: and(succeeded(), eq(variables['Agent.OS'], 'Linux')) -# Some tests rely on a clippy command to run, so let's try to install clippy to -# we can be sure to run those tests. -- bash: rustup component add clippy || echo "clippy not available" - displayName: "Install clippy (maybe)" - # Some tests also rely on rustfmt - bash: rustup component add rustfmt || echo "rustfmt not available" displayName: "Install rustfmt (maybe)" diff -Nru cargo-0.44.1/CONTRIBUTING.md cargo-0.47.0/CONTRIBUTING.md --- cargo-0.44.1/CONTRIBUTING.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/CONTRIBUTING.md 2020-07-17 20:39:39.000000000 +0000 @@ -6,7 +6,7 @@ issue tracker. If you have a general question about Cargo or it's internals, feel free to ask -on [Discord]. +on [Zulip]. ## Code of Conduct @@ -54,7 +54,7 @@ If you're looking for somewhere to start, check out the [E-easy][E-Easy] and [E-mentor][E-mentor] tags. -Feel free to ask for guidelines on how to tackle a problem on [Discord] or open a +Feel free to ask for guidelines on how to tackle a problem on [Zulip] or open a [new issue][new-issues]. This is especially important if you want to add new features to Cargo or make large changes to the already existing code-base. Cargo's core developers will do their best to provide help. @@ -118,10 +118,11 @@ `CFG_DISABLE_CROSS_TESTS=1` environment variable to disable these tests. The Windows cross tests only support the MSVC toolchain. -Some of the nightly tests require the `rustc-dev` component installed. This -component includes the compiler as a library. This may already be installed -with your nightly toolchain, but it if isn't, run `rustup component add -rustc-dev --toolchain=nightly`. +Some of the nightly tests require the `rustc-dev` and `llvm-tools-preview` +rustup components installed. These components include the compiler as a +library. This may already be installed with your nightly toolchain, but if it +isn't, run `rustup component add rustc-dev llvm-tools-preview +--toolchain=nightly`. There are several other packages in the repo for running specialized tests, and you will need to run these tests separately by changing into its directory @@ -144,7 +145,7 @@ After the pull request is made, a friendly bot will automatically assign a reviewer; the review-process will make sure that the proposed changes are sound. Please give the assigned reviewer sufficient time, especially during -weekends. If you don't get a reply, you may poke the core developers on [Discord]. +weekends. If you don't get a reply, you may poke the core developers on [Zulip]. A merge of Cargo's master-branch and your changes is immediately queued to be tested after the pull request is made. In case unforeseen @@ -225,7 +226,7 @@ [E-mentor]: https://github.com/rust-lang/cargo/labels/E-mentor [I-nominated]: https://github.com/rust-lang/cargo/labels/I-nominated [Code of Conduct]: https://www.rust-lang.org/conduct.html -[Discord]: https://discordapp.com/invite/rust-lang +[Zulip]: https://rust-lang.zulipchat.com/#narrow/stream/246057-t-cargo [`crates/cargo-test-support/src/lib.rs`]: crates/cargo-test-support/src/lib.rs [irlo]: https://internals.rust-lang.org/ [subcommands]: https://doc.rust-lang.org/cargo/reference/external-tools.html#custom-subcommands diff -Nru cargo-0.44.1/crates/cargo-platform/src/error.rs cargo-0.47.0/crates/cargo-platform/src/error.rs --- cargo-0.44.1/crates/cargo-platform/src/error.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/crates/cargo-platform/src/error.rs 2020-07-17 20:39:39.000000000 +0000 @@ -6,6 +6,7 @@ orig: String, } +#[non_exhaustive] #[derive(Debug)] pub enum ParseErrorKind { UnterminatedString, @@ -17,9 +18,6 @@ IncompleteExpr(&'static str), UnterminatedExpression(String), InvalidTarget(String), - - #[doc(hidden)] - __Nonexhaustive, } impl fmt::Display for ParseError { @@ -53,7 +51,6 @@ write!(f, "unexpected content `{}` found after cfg expression", s) } InvalidTarget(s) => write!(f, "invalid target specifier: {}", s), - __Nonexhaustive => unreachable!(), } } } diff -Nru cargo-0.44.1/crates/cargo-test-support/Cargo.toml cargo-0.47.0/crates/cargo-test-support/Cargo.toml --- cargo-0.44.1/crates/cargo-test-support/Cargo.toml 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/crates/cargo-test-support/Cargo.toml 2020-07-17 20:39:39.000000000 +0000 @@ -2,6 +2,7 @@ name = "cargo-test-support" version = "0.1.0" authors = ["Alex Crichton "] +license = "MIT OR Apache-2.0" edition = "2018" [lib] @@ -11,7 +12,7 @@ cargo = { path = "../.." } cargo-test-macro = { path = "../cargo-test-macro" } filetime = "0.2" -flate2 = "1.0" +flate2 = { version = "1.0", default-features = false, features = ["zlib"] } git2 = "0.13" glob = "0.3" lazy_static = "1.0" diff -Nru cargo-0.44.1/crates/cargo-test-support/src/cross_compile.rs cargo-0.47.0/crates/cargo-test-support/src/cross_compile.rs --- cargo-0.44.1/crates/cargo-test-support/src/cross_compile.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/crates/cargo-test-support/src/cross_compile.rs 2020-07-17 20:39:39.000000000 +0000 @@ -190,6 +190,8 @@ "i686-unknown-linux-gnu" } else if cfg!(all(target_os = "windows", target_env = "msvc")) { "i686-pc-windows-msvc" + } else if cfg!(all(target_os = "windows", target_env = "gnu")) { + "i686-pc-windows-gnu" } else { panic!("This test should be gated on cross_compile::disabled."); } diff -Nru cargo-0.44.1/crates/cargo-test-support/src/git.rs cargo-0.47.0/crates/cargo-test-support/src/git.rs --- cargo-0.44.1/crates/cargo-test-support/src/git.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/crates/cargo-test-support/src/git.rs 2020-07-17 20:39:39.000000000 +0000 @@ -39,8 +39,7 @@ */ use crate::{path2url, project, Project, ProjectBuilder}; -use std::fs::{self, File}; -use std::io::prelude::*; +use std::fs; use std::path::{Path, PathBuf}; use url::Url; @@ -81,7 +80,7 @@ pub fn nocommit_file(self, path: &str, contents: &str) -> RepoBuilder { let dst = self.repo.workdir().unwrap().join(path); t!(fs::create_dir_all(dst.parent().unwrap())); - t!(t!(File::create(&dst)).write_all(contents.as_bytes())); + t!(fs::write(&dst, contents)); self } diff -Nru cargo-0.44.1/crates/cargo-test-support/src/lib.rs cargo-0.47.0/crates/cargo-test-support/src/lib.rs --- cargo-0.44.1/crates/cargo-test-support/src/lib.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/crates/cargo-test-support/src/lib.rs 2020-07-17 20:39:39.000000000 +0000 @@ -112,13 +112,11 @@ use std::ffi::OsStr; use std::fmt; use std::fs; -use std::io::prelude::*; use std::os; use std::path::{Path, PathBuf}; use std::process::{Command, Output}; use std::str; use std::time::{self, Duration}; -use std::usize; use cargo::util::{is_ci, CargoResult, ProcessBuilder, ProcessError, Rustc}; use serde_json::{self, Value}; @@ -166,11 +164,8 @@ fn mk(&self) { self.dirname().mkdir_p(); - - let mut file = fs::File::create(&self.path) + fs::write(&self.path, &self.body) .unwrap_or_else(|e| panic!("could not create file {}: {}", self.path.display(), e)); - - t!(file.write_all(self.body.as_bytes())); } fn dirname(&self) -> &Path { @@ -458,25 +453,15 @@ /// Returns the contents of a path in the project root pub fn read_file(&self, path: &str) -> String { - let mut buffer = String::new(); - fs::File::open(self.root().join(path)) - .unwrap() - .read_to_string(&mut buffer) - .unwrap(); - buffer + let full = self.root().join(path); + fs::read_to_string(&full) + .unwrap_or_else(|e| panic!("could not read file {}: {}", full.display(), e)) } /// Modifies `Cargo.toml` to remove all commented lines. pub fn uncomment_root_manifest(&self) { - let mut contents = String::new(); - fs::File::open(self.root().join("Cargo.toml")) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); - fs::File::create(self.root().join("Cargo.toml")) - .unwrap() - .write_all(contents.replace("#", "").as_bytes()) - .unwrap(); + let contents = self.read_file("Cargo.toml").replace("#", ""); + fs::write(self.root().join("Cargo.toml"), contents).unwrap(); } pub fn symlink(&self, src: impl AsRef, dst: impl AsRef) { @@ -1230,6 +1215,14 @@ } MatchKind::Unordered => { let mut a = actual.lines().collect::>(); + // match more-constrained lines first, although in theory we'll + // need some sort of recursive match here. This handles the case + // that you expect "a\n[..]b" and two lines are printed out, + // "ab\n"a", where technically we do match unordered but a naive + // search fails to find this. This simple sort at least gets the + // test suite to pass for now, but we may need to get more fancy + // if tests start failing again. + a.sort_by_key(|s| s.len()); let e = out.lines(); for e_line in e { @@ -1679,6 +1672,7 @@ pub static RUSTC: Rustc = Rustc::new( PathBuf::from("rustc"), None, + None, Path::new("should be path to rustup rustc, but we don't care in tests"), None, ).unwrap() @@ -1728,6 +1722,7 @@ .env_remove("RUSTDOC") .env_remove("RUSTC_WRAPPER") .env_remove("RUSTFLAGS") + .env_remove("RUSTDOCFLAGS") .env_remove("XDG_CONFIG_HOME") // see #2345 .env("GIT_CONFIG_NOSYSTEM", "1") // keep trying to sandbox ourselves .env_remove("EMAIL") @@ -1846,10 +1841,6 @@ } /// The error message for ENOENT. -/// -/// It's generally not good to match against OS error messages, but I think -/// this one is relatively stable. -#[cfg(windows)] -pub const NO_SUCH_FILE_ERR_MSG: &str = "The system cannot find the file specified. (os error 2)"; -#[cfg(not(windows))] -pub const NO_SUCH_FILE_ERR_MSG: &str = "No such file or directory (os error 2)"; +pub fn no_such_file_err_msg() -> String { + std::io::Error::from_raw_os_error(2).to_string() +} diff -Nru cargo-0.44.1/crates/cargo-test-support/src/paths.rs cargo-0.47.0/crates/cargo-test-support/src/paths.rs --- cargo-0.44.1/crates/cargo-test-support/src/paths.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/crates/cargo-test-support/src/paths.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,3 +1,4 @@ +use crate::{basic_manifest, project}; use filetime::{self, FileTime}; use lazy_static::lazy_static; use std::cell::RefCell; @@ -264,3 +265,24 @@ let sysroot = String::from_utf8(output.stdout).unwrap(); sysroot.trim().to_string() } + +pub fn echo_wrapper() -> std::path::PathBuf { + let p = project() + .at("rustc-echo-wrapper") + .file("Cargo.toml", &basic_manifest("rustc-echo-wrapper", "1.0.0")) + .file( + "src/main.rs", + r#" + fn main() { + let args = std::env::args().collect::>(); + eprintln!("WRAPPER CALLED: {}", args[1..].join(" ")); + let status = std::process::Command::new(&args[1]) + .args(&args[2..]).status().unwrap(); + std::process::exit(status.code().unwrap_or(1)); + } + "#, + ) + .build(); + p.cargo("build").run(); + p.bin("rustc-echo-wrapper") +} diff -Nru cargo-0.44.1/crates/cargo-test-support/src/registry.rs cargo-0.47.0/crates/cargo-test-support/src/registry.rs --- cargo-0.44.1/crates/cargo-test-support/src/registry.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/crates/cargo-test-support/src/registry.rs 2020-07-17 20:39:39.000000000 +0000 @@ -144,6 +144,8 @@ local: bool, alternative: bool, invalid_json: bool, + proc_macro: bool, + links: Option, } #[derive(Clone)] @@ -161,36 +163,37 @@ pub fn init() { let config = paths::home().join(".cargo/config"); t!(fs::create_dir_all(config.parent().unwrap())); - if fs::metadata(&config).is_ok() { + if config.exists() { return; } - t!(t!(File::create(&config)).write_all( + t!(fs::write( + &config, format!( r#" - [source.crates-io] - registry = 'https://wut' - replace-with = 'dummy-registry' - - [source.dummy-registry] - registry = '{reg}' - - [registries.alternative] - index = '{alt}' - "#, + [source.crates-io] + registry = 'https://wut' + replace-with = 'dummy-registry' + + [source.dummy-registry] + registry = '{reg}' + + [registries.alternative] + index = '{alt}' + "#, reg = registry_url(), alt = alt_registry_url() ) - .as_bytes() )); let credentials = paths::home().join(".cargo/credentials"); - t!(t!(File::create(&credentials)).write_all( - br#" - [registry] - token = "api-token" - - [registries.alternative] - token = "api-token" - "# + t!(fs::write( + &credentials, + r#" + [registry] + token = "api-token" + + [registries.alternative] + token = "api-token" + "# )); // Initialize a new registry. @@ -242,6 +245,8 @@ local: false, alternative: false, invalid_json: false, + proc_macro: false, + links: None, } } @@ -345,6 +350,12 @@ self } + /// Specifies whether or not this is a proc macro. + pub fn proc_macro(&mut self, proc_macro: bool) -> &mut Package { + self.proc_macro = proc_macro; + self + } + /// Adds an entry in the `[features]` section. pub fn feature(&mut self, name: &str, deps: &[&str]) -> &mut Package { let deps = deps.iter().map(|s| s.to_string()).collect(); @@ -359,6 +370,11 @@ self } + pub fn links(&mut self, links: &str) -> &mut Package { + self.links = Some(links.to_string()); + self + } + /// Creates the package and place it in the registry. /// /// This does not actually use Cargo's publishing system, but instead @@ -375,14 +391,13 @@ .map(|dep| { // In the index, the `registry` is null if it is from the same registry. // In Cargo.toml, it is None if it is from crates.io. - let registry_url = - match (self.alternative, dep.registry.as_ref().map(|s| s.as_ref())) { - (false, None) => None, - (false, Some("alternative")) => Some(alt_registry_url().to_string()), - (true, None) => Some(CRATES_IO_INDEX.to_string()), - (true, Some("alternative")) => None, - _ => panic!("registry_dep currently only supports `alternative`"), - }; + let registry_url = match (self.alternative, dep.registry.as_deref()) { + (false, None) => None, + (false, Some("alternative")) => Some(alt_registry_url().to_string()), + (true, None) => Some(CRATES_IO_INDEX.to_string()), + (true, Some("alternative")) => None, + _ => panic!("registry_dep currently only supports `alternative`"), + }; serde_json::json!({ "name": dep.name, "req": dep.vers, @@ -397,8 +412,7 @@ }) .collect::>(); let cksum = { - let mut c = Vec::new(); - t!(t!(File::open(&self.archive_dst())).read_to_end(&mut c)); + let c = t!(fs::read(&self.archive_dst())); cksum(&c) }; let name = if self.invalid_json { @@ -413,6 +427,7 @@ "cksum": cksum, "features": self.features, "yanked": self.yanked, + "links": self.links, }) .to_string(); @@ -435,10 +450,9 @@ } else { registry_path.join(&file) }; - let mut prev = String::new(); - let _ = File::open(&dst).and_then(|mut f| f.read_to_string(&mut prev)); + let prev = fs::read_to_string(&dst).unwrap_or_default(); t!(fs::create_dir_all(dst.parent().unwrap())); - t!(t!(File::create(&dst)).write_all((prev + &line[..] + "\n").as_bytes())); + t!(fs::write(&dst, prev + &line[..] + "\n")); // Add the new file to the index. if !self.local { @@ -467,6 +481,27 @@ } fn make_archive(&self) { + let dst = self.archive_dst(); + t!(fs::create_dir_all(dst.parent().unwrap())); + let f = t!(File::create(&dst)); + let mut a = Builder::new(GzEncoder::new(f, Compression::default())); + + if !self.files.iter().any(|(name, _)| name == "Cargo.toml") { + self.append_manifest(&mut a); + } + if self.files.is_empty() { + self.append(&mut a, "src/lib.rs", ""); + } else { + for &(ref name, ref contents) in self.files.iter() { + self.append(&mut a, name, contents); + } + } + for &(ref name, ref contents) in self.extra_files.iter() { + self.append_extra(&mut a, name, contents); + } + } + + fn append_manifest(&self, ar: &mut Builder) { let mut manifest = format!( r#" [package] @@ -498,22 +533,11 @@ manifest.push_str(&format!("registry-index = \"{}\"", alt_registry_url())); } } - - let dst = self.archive_dst(); - t!(fs::create_dir_all(dst.parent().unwrap())); - let f = t!(File::create(&dst)); - let mut a = Builder::new(GzEncoder::new(f, Compression::default())); - self.append(&mut a, "Cargo.toml", &manifest); - if self.files.is_empty() { - self.append(&mut a, "src/lib.rs", ""); - } else { - for &(ref name, ref contents) in self.files.iter() { - self.append(&mut a, name, contents); - } - } - for &(ref name, ref contents) in self.extra_files.iter() { - self.append_extra(&mut a, name, contents); + if self.proc_macro { + manifest.push_str("[lib]\nproc-macro = true\n"); } + + self.append(ar, "Cargo.toml", &manifest); } fn append(&self, ar: &mut Builder, file: &str, contents: &str) { diff -Nru cargo-0.44.1/crates/crates-io/Cargo.toml cargo-0.47.0/crates/crates-io/Cargo.toml --- cargo-0.44.1/crates/crates-io/Cargo.toml 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/crates/crates-io/Cargo.toml 2020-07-17 20:39:39.000000000 +0000 @@ -1,6 +1,6 @@ [package] name = "crates-io" -version = "0.31.0" +version = "0.31.1" edition = "2018" authors = ["Alex Crichton "] license = "MIT OR Apache-2.0" @@ -18,6 +18,5 @@ anyhow = "1.0.0" percent-encoding = "2.0" serde = { version = "1.0", features = ['derive'] } -serde_derive = "1.0" serde_json = "1.0" url = "2.0" diff -Nru cargo-0.44.1/crates/crates-io/lib.rs cargo-0.47.0/crates/crates-io/lib.rs --- cargo-0.44.1/crates/crates-io/lib.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/crates/crates-io/lib.rs 2020-07-17 20:39:39.000000000 +0000 @@ -54,7 +54,6 @@ pub license_file: Option, pub repository: Option, pub badges: BTreeMap>, - #[serde(default)] pub links: Option, } @@ -140,9 +139,7 @@ } pub fn host_is_crates_io(&self) -> bool { - Url::parse(self.host()) - .map(|u| u.host_str() == Some("crates.io")) - .unwrap_or(false) + is_url_crates_io(&self.host) } pub fn add_owners(&mut self, krate: &str, owners: &[&str]) -> Result { @@ -421,3 +418,10 @@ _ => "", } } + +/// Returns `true` if the host of the given URL is "crates.io". +pub fn is_url_crates_io(url: &str) -> bool { + Url::parse(url) + .map(|u| u.host_str() == Some("crates.io")) + .unwrap_or(false) +} diff -Nru cargo-0.44.1/crates/resolver-tests/src/lib.rs cargo-0.47.0/crates/resolver-tests/src/lib.rs --- cargo-0.44.1/crates/resolver-tests/src/lib.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/crates/resolver-tests/src/lib.rs 2020-07-17 20:39:39.000000000 +0000 @@ -734,8 +734,8 @@ "vec![pkg!((\"foo\", \"1.0.1\") => [dep_req(\"bar\", \"^1\"),]),\ pkg!((\"foo\", \"1.0.0\") => [dep_req(\"bar\", \"^2\"),]),\ pkg!((\"foo\", \"2.0.0\") => [dep(\"bar\"),]),\ - pkg!((\"bar\", \"1.0.0\") => [dep_req(\"baz\", \"= 1.0.2\"),dep_req(\"other\", \"^1\"),]),\ - pkg!((\"bar\", \"2.0.0\") => [dep_req(\"baz\", \"= 1.0.1\"),]),\ + pkg!((\"bar\", \"1.0.0\") => [dep_req(\"baz\", \"=1.0.2\"),dep_req(\"other\", \"^1\"),]),\ + pkg!((\"bar\", \"2.0.0\") => [dep_req(\"baz\", \"=1.0.1\"),]),\ pkg!((\"baz\", \"1.0.2\") => [dep_req(\"other\", \"^2\"),]),\ pkg!((\"baz\", \"1.0.1\")),\ pkg!((\"cat\", \"1.0.2\") => [dep_req_kind(\"other\", \"^2\", DepKind::Build, false),]),\ diff -Nru cargo-0.44.1/crates/resolver-tests/tests/resolve.rs cargo-0.47.0/crates/resolver-tests/tests/resolve.rs --- cargo-0.44.1/crates/resolver-tests/tests/resolve.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/crates/resolver-tests/tests/resolve.rs 2020-07-17 20:39:39.000000000 +0000 @@ -25,7 +25,7 @@ 0 } else { // but that local builds will give a small clear test case. - std::u32::MAX + u32::MAX }, result_cache: prop::test_runner::basic_result_cache, .. ProptestConfig::default() @@ -1459,6 +1459,31 @@ } #[test] +fn bad_lockfile_from_8249() { + let input = vec![ + pkg!(("a-sys", "0.2.0")), + pkg!(("a-sys", "0.1.0")), + pkg!(("b", "0.1.0") => [ + dep_req("a-sys", "0.1"), // should be optional: true, but not deeded for now + ]), + pkg!(("c", "1.0.0") => [ + dep_req("b", "=0.1.0"), + ]), + pkg!("foo" => [ + dep_req("a-sys", "=0.2.0"), + { + let mut b = dep_req("b", "=0.1.0"); + b.set_features(vec!["a-sys"]); + b + }, + dep_req("c", "=1.0.0"), + ]), + ]; + let reg = registry(input); + let _ = resolve_and_validated(vec![dep("foo")], ®, None); +} + +#[test] fn cyclic_good_error_message() { let input = vec![ pkg!(("A", "0.0.0") => [dep("C")]), diff -Nru cargo-0.44.1/debian/changelog cargo-0.47.0/debian/changelog --- cargo-0.44.1/debian/changelog 2020-06-24 19:53:54.000000000 +0000 +++ cargo-0.47.0/debian/changelog 2020-12-10 07:43:51.000000000 +0000 @@ -1,9 +1,37 @@ -cargo (0.44.1-0ubuntu1~18.04.1) bionic; urgency=medium +cargo (0.47.0-1~exp1ubuntu1~18.04.1) bionic; urgency=medium - * Backport to Bionic. (LP: #1876942) + * Backport to Bionic. (LP: #1901571) * Relax debhelper requirement. - -- Michael Hudson-Doyle Thu, 25 Jun 2020 07:53:54 +1200 + -- Michael Hudson-Doyle Thu, 10 Dec 2020 20:43:51 +1300 + +cargo (0.47.0-1~exp1ubuntu1) hirsute; urgency=medium + + * Merge from Debian experimental (LP: #1901571): Remaining changes: + - Don't use the bootstrap.py script for bootstrapping as it no longer + works. + - remove debian/bootstrap.py + - update debian/make_orig_multi.sh + - Embed libgit2 1.0.0 which is not yet in Debian or Ubuntu. + - add debian/libgit2 + - add debian/patches/use-system-libhttp-parser.patch + - update debian/control + - update debian/copyright + - update debiab/patches/series + - update debian/README.source + - update debian/rules + * d/patches/0001-relax-deprecated-diagnostic-message-check.patch: + backport fix for tests that fail with rustc 1.47 from upstream. + * d/patches/skip-filters_target-i386.patch: skip a test that fails on + i386 for silly reasons. + + -- Michael Hudson-Doyle Wed, 09 Dec 2020 10:58:12 +1300 + +cargo (0.47.0-1~exp1) experimental; urgency=medium + + * New upstream release. + + -- Ximin Luo Fri, 02 Oct 2020 02:04:10 +0100 cargo (0.44.1-0ubuntu1) groovy; urgency=medium diff -Nru cargo-0.44.1/debian/copyright cargo-0.47.0/debian/copyright --- cargo-0.44.1/debian/copyright 2020-05-27 22:37:10.000000000 +0000 +++ cargo-0.47.0/debian/copyright 2020-12-03 20:53:35.000000000 +0000 @@ -17,6 +17,7 @@ azure-pipelines.yml ci/* publish.py + triagebot.toml Copyright: 2014 The Rust Project Developers License: MIT or Apache-2.0 Comment: please do not add * to the above paragraph, so we can use lintian to @@ -136,6 +137,11 @@ License: MIT OR Apache-2.0 Comment: see https://github.com/dtolnay/anyhow +Files: vendor/adler/* +Copyright: 2020-2020 Jonas Schievink +License: 0BSD OR MIT OR Apache-2.0 +Comment: see https://github.com/jonas-schievink/adler.git + Files: vendor/adler32/* Copyright: 2015-2019 Remi Rampin License: Zlib @@ -495,6 +501,11 @@ License: MIT or Apache-2.0 Comment: see https://github.com/Amanieu/thread_local-rs +Files: vendor/tinyvec/* +Copyright: 2020-2020 Lokathor +License: Zlib OR Apache-2.0 OR MIT +Comment: see https://github.com/Lokathor/tinyvec + Files: vendor/typenum/* Copyright: 2015-2018 Paho Lurie-Gregg 2015-2018 Andre Bogus @@ -576,6 +587,18 @@ License: BSD-2-clause Comment: See LICENSE at https://github.com/dhuseby/cargo-bootstrap/ +License: 0BSD + Permission to use, copy, modify, and/or distribute this software for + any purpose with or without fee is hereby granted. + . + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + License: Apache-2.0 On Debian systems, see /usr/share/common-licenses/Apache-2.0 for the full text of the Apache License version 2. diff -Nru cargo-0.44.1/debian/debcargo-conf.patch cargo-0.47.0/debian/debcargo-conf.patch --- cargo-0.44.1/debian/debcargo-conf.patch 2020-05-27 21:14:16.000000000 +0000 +++ cargo-0.47.0/debian/debcargo-conf.patch 2020-12-03 20:49:58.000000000 +0000 @@ -1,173 +1,5 @@ -diff --git a/src/cc/debian/patches/clear-cflags-and-cxxflags-before-tests.diff b/src/cc/debian/patches/clear-cflags-and-cxxflags-before-tests.diff -deleted file mode 100644 -index 5ef5c4b4..00000000 ---- a/src/cc/debian/patches/clear-cflags-and-cxxflags-before-tests.diff -+++ /dev/null -@@ -1,156 +0,0 @@ --From b3efc44c3d18cec312594040f0f09f7d39f00832 Mon Sep 17 00:00:00 2001 --From: Robin Krahl --Date: Mon, 27 Jan 2020 20:59:52 +0100 --Subject: [PATCH] Clear CFLAGS and CXXFLAGS before tests (#472) -- --Some test cases check that a compiler flag is not present. But --cc::Build loads additional flags from the CFLAGS and CXXFLAGS --environment variables. If these are set, they might interfere with the --test cases. -- --Therefore we clear the CFLAGS and CXXFLAGS environment variables before --running a test that requires an absent flag. ----- -- tests/test.rs | 36 ++++++++++++++++++++++++++++++++++++ -- 1 file changed, 36 insertions(+) -- --diff --git a/tests/test.rs b/tests/test.rs --index def11f02..3c9b4dc4 100644 ----- a/tests/test.rs --+++ b/tests/test.rs --@@ -2,8 +2,18 @@ use crate::support::Test; -- -- mod support; -- --+// Some tests check that a flag is *not* present. These tests might fail if the flag is set in the --+// CFLAGS or CXXFLAGS environment variables. This function clears the CFLAGS and CXXFLAGS --+// variables to make sure that the tests can run correctly. --+fn reset_env() { --+ std::env::set_var("CFLAGS", ""); --+ std::env::set_var("CXXFLAGS", ""); --+} --+ -- #[test] -- fn gnu_smoke() { --+ reset_env(); --+ -- let test = Test::gnu(); -- test.gcc().file("foo.c").compile("foo"); -- --@@ -19,6 +29,8 @@ fn gnu_smoke() { -- -- #[test] -- fn gnu_opt_level_1() { --+ reset_env(); --+ -- let test = Test::gnu(); -- test.gcc().opt_level(1).file("foo.c").compile("foo"); -- --@@ -27,6 +39,8 @@ fn gnu_opt_level_1() { -- -- #[test] -- fn gnu_opt_level_s() { --+ reset_env(); --+ -- let test = Test::gnu(); -- test.gcc().opt_level_str("s").file("foo.c").compile("foo"); -- --@@ -56,6 +70,8 @@ fn gnu_debug_fp() { -- -- #[test] -- fn gnu_debug_nofp() { --+ reset_env(); --+ -- let test = Test::gnu(); -- test.gcc() -- .debug(true) --@@ -100,6 +116,8 @@ fn gnu_warnings() { -- -- #[test] -- fn gnu_extra_warnings0() { --+ reset_env(); --+ -- let test = Test::gnu(); -- test.gcc() -- .warnings(true) --@@ -113,6 +131,8 @@ fn gnu_extra_warnings0() { -- -- #[test] -- fn gnu_extra_warnings1() { --+ reset_env(); --+ -- let test = Test::gnu(); -- test.gcc() -- .warnings(false) --@@ -126,6 +146,8 @@ fn gnu_extra_warnings1() { -- -- #[test] -- fn gnu_warnings_overridable() { --+ reset_env(); --+ -- let test = Test::gnu(); -- test.gcc() -- .warnings(true) --@@ -154,6 +176,8 @@ fn gnu_x86_64() { -- -- #[test] -- fn gnu_x86_64_no_pic() { --+ reset_env(); --+ -- for vendor in &["unknown-linux-gnu", "apple-darwin"] { -- let target = format!("x86_64-{}", vendor); -- let test = Test::gnu(); --@@ -215,6 +239,8 @@ fn gnu_x86_64_no_plt() { -- -- #[test] -- fn gnu_set_stdlib() { --+ reset_env(); --+ -- let test = Test::gnu(); -- test.gcc() -- .cpp_set_stdlib(Some("foo")) --@@ -253,6 +279,8 @@ fn gnu_compile_assembly() { -- -- #[test] -- fn gnu_shared() { --+ reset_env(); --+ -- let test = Test::gnu(); -- test.gcc() -- .file("foo.c") --@@ -265,6 +293,8 @@ fn gnu_shared() { -- -- #[test] -- fn gnu_flag_if_supported() { --+ reset_env(); --+ -- if cfg!(windows) { -- return; -- } --@@ -301,6 +331,8 @@ fn gnu_flag_if_supported_cpp() { -- -- #[test] -- fn gnu_static() { --+ reset_env(); --+ -- let test = Test::gnu(); -- test.gcc() -- .file("foo.c") --@@ -313,6 +345,8 @@ fn gnu_static() { -- -- #[test] -- fn msvc_smoke() { --+ reset_env(); --+ -- let test = Test::msvc(); -- test.gcc().file("foo.c").compile("foo"); -- --@@ -327,6 +361,8 @@ fn msvc_smoke() { -- -- #[test] -- fn msvc_opt_level_0() { --+ reset_env(); --+ -- let test = Test::msvc(); -- test.gcc().opt_level(0).file("foo.c").compile("foo"); -- -diff --git a/src/cc/debian/patches/series b/src/cc/debian/patches/series -index 41efdba2..e69de29b 100644 ---- a/src/cc/debian/patches/series -+++ b/src/cc/debian/patches/series -@@ -1 +0,0 @@ --clear-cflags-and-cxxflags-before-tests.diff diff --git a/src/commoncrypto-sys/debian/patches/no-clippy.patch b/src/commoncrypto-sys/debian/patches/no-clippy.patch -index 3e4c8850..93789f3b 100644 +index 3e4c885..93789f3 100644 --- a/src/commoncrypto-sys/debian/patches/no-clippy.patch +++ b/src/commoncrypto-sys/debian/patches/no-clippy.patch @@ -1,17 +1,16 @@ @@ -200,7 +32,7 @@ + [dev-dependencies] + hex = "0.2" diff --git a/src/commoncrypto/debian/patches/no-clippy.patch b/src/commoncrypto/debian/patches/no-clippy.patch -index 38d9c925..b21a7cae 100644 +index 38d9c92..b21a7ca 100644 --- a/src/commoncrypto/debian/patches/no-clippy.patch +++ b/src/commoncrypto/debian/patches/no-clippy.patch @@ -1,17 +1,16 @@ @@ -232,77 +64,8 @@ +- + [dev-dependencies] + hex = "0.2" -diff --git a/src/libgit2-sys/debian/patches/abi-compat-0.28.3.patch b/src/libgit2-sys/debian/patches/abi-compat-0.28.3.patch -deleted file mode 100644 -index 43c1c974..00000000 ---- a/src/libgit2-sys/debian/patches/abi-compat-0.28.3.patch -+++ /dev/null -@@ -1,10 +0,0 @@ ----- a/lib.rs --+++ b/lib.rs --@@ -331,7 +331,6 @@ -- pub push_negotiation: Option, -- pub transport: Option, -- pub payload: *mut c_void, --- pub resolve_url: Option, -- } -- -- #[repr(C)] -diff --git a/src/libgit2-sys/debian/patches/no-special-snowflake-env.patch b/src/libgit2-sys/debian/patches/no-special-snowflake-env.patch -index d9d3a918..40586d32 100644 ---- a/src/libgit2-sys/debian/patches/no-special-snowflake-env.patch -+++ b/src/libgit2-sys/debian/patches/no-special-snowflake-env.patch -@@ -1,16 +1,7 @@ - --- a/build.rs - +++ b/build.rs --@@ -7,7 +7,7 @@ -- let https = env::var("CARGO_FEATURE_HTTPS").is_ok(); -- let ssh = env::var("CARGO_FEATURE_SSH").is_ok(); -- --- if env::var("LIBGIT2_SYS_USE_PKG_CONFIG").is_ok() { --+ if true { -- let mut cfg = pkg_config::Config::new(); -- if let Ok(lib) = cfg.atleast_version("0.28.0").probe("libgit2") { -- for include in &lib.include_paths { --@@ -17,7 +17,7 @@ -- } -+@@ -15,7 +15,7 @@ -+ return; - } - - - if !Path::new("libgit2/.git").exists() { -diff --git a/src/libgit2-sys/debian/patches/series b/src/libgit2-sys/debian/patches/series -index 54a9d187..206beed4 100644 ---- a/src/libgit2-sys/debian/patches/series -+++ b/src/libgit2-sys/debian/patches/series -@@ -1,2 +1 @@ - no-special-snowflake-env.patch --abi-compat-0.28.3.patch -diff --git a/src/libssh2-sys/debian/patches/no-special-snowflake-env.patch b/src/libssh2-sys/debian/patches/no-special-snowflake-env.patch -index 46dceb51..6c0a6c10 100644 ---- a/src/libssh2-sys/debian/patches/no-special-snowflake-env.patch -+++ b/src/libssh2-sys/debian/patches/no-special-snowflake-env.patch -@@ -1,15 +1,16 @@ - --- a/build.rs - +++ b/build.rs --@@ -17,7 +17,7 @@ -+@@ -17,8 +17,7 @@ - // The system copy of libssh2 is not used by default because it - // can lead to having two copies of libssl loaded at once. - // See https://github.com/alexcrichton/ssh2-rs/pull/88 -+- println!("cargo:rerun-if-env-changed=LIBSSH2_SYS_USE_PKG_CONFIG"); - - if env::var("LIBSSH2_SYS_USE_PKG_CONFIG").is_ok() { - + if true { - if let Ok(lib) = pkg_config::find_library("libssh2") { - for path in &lib.include_paths { - println!("cargo:include={}", path.display()); --@@ -26,7 +26,7 @@ -+@@ -27,7 +26,7 @@ - } - } - diff --git a/src/unicode-bidi/debian/patches/no-flamegraphs.patch b/src/unicode-bidi/debian/patches/no-flamegraphs.patch -index 6234d5da..9acab863 100644 +index 6234d5d..9acab86 100644 --- a/src/unicode-bidi/debian/patches/no-flamegraphs.patch +++ b/src/unicode-bidi/debian/patches/no-flamegraphs.patch @@ -1,5 +1,5 @@ diff -Nru cargo-0.44.1/debian/patches/0001-relax-deprecated-diagnostic-message-check.patch cargo-0.47.0/debian/patches/0001-relax-deprecated-diagnostic-message-check.patch --- cargo-0.44.1/debian/patches/0001-relax-deprecated-diagnostic-message-check.patch 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/debian/patches/0001-relax-deprecated-diagnostic-message-check.patch 2020-12-08 21:58:04.000000000 +0000 @@ -0,0 +1,39 @@ +From 537a0202679b9ccd2a951d2695a2ad7b18b16415 Mon Sep 17 00:00:00 2001 +From: Andy Russell +Date: Thu, 30 Jul 2020 21:04:22 -0400 +Subject: [PATCH] relax deprecated diagnostic message check + +--- + tests/testsuite/fix.rs | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/tests/testsuite/fix.rs b/tests/testsuite/fix.rs +index 3a293cfc2..c54cd77c0 100644 +--- a/tests/testsuite/fix.rs ++++ b/tests/testsuite/fix.rs +@@ -686,7 +686,7 @@ fn shows_warnings() { + .build(); + + p.cargo("fix --allow-no-vcs") +- .with_stderr_contains("[..]warning: use of deprecated item[..]") ++ .with_stderr_contains("[..]warning: use of deprecated[..]") + .run(); + } + +@@ -929,11 +929,11 @@ fn shows_warnings_on_second_run_without_changes() { + .build(); + + p.cargo("fix --allow-no-vcs") +- .with_stderr_contains("[..]warning: use of deprecated item[..]") ++ .with_stderr_contains("[..]warning: use of deprecated[..]") + .run(); + + p.cargo("fix --allow-no-vcs") +- .with_stderr_contains("[..]warning: use of deprecated item[..]") ++ .with_stderr_contains("[..]warning: use of deprecated[..]") + .run(); + } + +-- +2.25.1 + diff -Nru cargo-0.44.1/debian/patches/2002_disable-net-tests.patch cargo-0.47.0/debian/patches/2002_disable-net-tests.patch --- cargo-0.44.1/debian/patches/2002_disable-net-tests.patch 2020-05-27 21:31:02.000000000 +0000 +++ cargo-0.47.0/debian/patches/2002_disable-net-tests.patch 2020-12-03 20:49:58.000000000 +0000 @@ -5,16 +5,16 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ --- a/tests/testsuite/git_auth.rs +++ b/tests/testsuite/git_auth.rs -@@ -12,7 +12,7 @@ - use cargo_test_support::{basic_manifest, project}; +@@ -102,7 +102,7 @@ + } // Tests that HTTP auth is offered from `credential.helper`. -#[cargo_test] +#[allow(dead_code)] fn http_auth_offered() { - let server = TcpListener::bind("127.0.0.1:0").unwrap(); - let addr = server.local_addr().unwrap(); -@@ -159,7 +159,7 @@ + let (addr, t, connections) = setup_failed_auth_test(); + let p = project() +@@ -167,7 +167,7 @@ } // Boy, sure would be nice to have a TLS implementation in rust! diff -Nru cargo-0.44.1/debian/patches/2003_disable_unicode_xid_favicon.patch cargo-0.47.0/debian/patches/2003_disable_unicode_xid_favicon.patch --- cargo-0.44.1/debian/patches/2003_disable_unicode_xid_favicon.patch 2020-05-27 21:31:03.000000000 +0000 +++ cargo-0.47.0/debian/patches/2003_disable_unicode_xid_favicon.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ ---- a/vendor/unicode-xid/src/lib.rs -+++ b/vendor/unicode-xid/src/lib.rs -@@ -38,8 +38,6 @@ - //! ``` - - #![deny(missing_docs, unsafe_code)] --#![doc(html_logo_url = "https://unicode-rs.github.io/unicode-rs_sm.png", -- html_favicon_url = "https://unicode-rs.github.io/unicode-rs_sm.png")] - - #![no_std] - #![cfg_attr(feature = "bench", feature(test, unicode_internals))] diff -Nru cargo-0.44.1/debian/patches/series cargo-0.47.0/debian/patches/series --- cargo-0.44.1/debian/patches/series 2020-06-24 19:53:48.000000000 +0000 +++ cargo-0.47.0/debian/patches/series 2020-12-08 21:58:04.000000000 +0000 @@ -1,4 +1,5 @@ 2002_disable-net-tests.patch -2003_disable_unicode_xid_favicon.patch 2005_disable_fetch_cross_tests.patch use-system-libhttp-parser.patch +0001-relax-deprecated-diagnostic-message-check.patch +skip-filters_target-i386.patch diff -Nru cargo-0.44.1/debian/patches/skip-filters_target-i386.patch cargo-0.47.0/debian/patches/skip-filters_target-i386.patch --- cargo-0.44.1/debian/patches/skip-filters_target-i386.patch 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/debian/patches/skip-filters_target-i386.patch 2020-12-08 21:58:04.000000000 +0000 @@ -0,0 +1,10 @@ +--- a/tests/testsuite/tree.rs ++++ b/tests/testsuite/tree.rs +@@ -322,6 +322,7 @@ + } + + #[cargo_test] ++#[cfg(not(target_arch = "x86"))] + fn filters_target() { + // --target flag + Package::new("targetdep", "1.0.0").publish(); diff -Nru cargo-0.44.1/debian/rules cargo-0.47.0/debian/rules --- cargo-0.44.1/debian/rules 2020-05-27 22:37:10.000000000 +0000 +++ cargo-0.47.0/debian/rules 2020-12-03 20:53:35.000000000 +0000 @@ -55,3 +55,4 @@ override_dh_clean: # Upstream contains a lot of these dh_clean -XCargo.toml.orig + rm -f Cargo.lock diff -Nru cargo-0.44.1/debian/scripts/audit-vendor-source cargo-0.47.0/debian/scripts/audit-vendor-source --- cargo-0.44.1/debian/scripts/audit-vendor-source 2020-02-29 14:30:20.000000000 +0000 +++ cargo-0.47.0/debian/scripts/audit-vendor-source 2020-12-03 20:49:58.000000000 +0000 @@ -2,6 +2,9 @@ # Audit Rust crate source for suspicious files in the current directory, that # shouldn't or can't be part of a Debian source package. # +# NOTE: this overwrites & deletes files in the current directory!!! Make a +# backup before running this script. +# # Usage: $0 [] set -e @@ -18,6 +21,9 @@ echo "Checking for suspicious files..." # Remove cargo metadata files find . '(' -name '.cargo-checksum.json' -or -name '.cargo_vcs_info.json' ')' -delete +# Strip comments & blank lines before testing rust source code - +# some authors like to write really long comments +find . -name '*.rs' -execdir sed -i -e '\,^\s*//,d' -e '/^\s*$/d' '{}' \; # TODO: merge the -m stuff into suspicious-source(1). suspicious-source -v "$@" diff -Nru cargo-0.44.1/debian/vendor-tarball-unsuspicious.txt cargo-0.47.0/debian/vendor-tarball-unsuspicious.txt --- cargo-0.44.1/debian/vendor-tarball-unsuspicious.txt 2020-05-15 01:04:49.000000000 +0000 +++ cargo-0.47.0/debian/vendor-tarball-unsuspicious.txt 2020-12-03 20:49:58.000000000 +0000 @@ -28,6 +28,7 @@ # ideally should be autogenerated, but too difficult today bstr/src/unicode/fsm/*.dfa +regex-syntax/src/unicode_tables/*.rs # "verylongtext" but OK source code, manually audited: cloudabi/cloudabi.rs diff -Nru cargo-0.44.1/src/bin/cargo/cli.rs cargo-0.47.0/src/bin/cargo/cli.rs --- cargo-0.44.1/src/bin/cargo/cli.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/cli.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,5 +1,5 @@ use cargo::core::features; -use cargo::{self, CliResult, Config}; +use cargo::{self, drop_print, drop_println, CliResult, Config}; use clap::{AppSettings, Arg, ArgMatches}; use super::commands; @@ -25,7 +25,8 @@ }; if args.value_of("unstable-features") == Some("help") { - println!( + drop_println!( + config, " Available unstable (nightly-only) flags: @@ -40,7 +41,8 @@ Run with 'cargo -Z [FLAG] [SUBCOMMAND]'" ); if !features::nightly_features_allowed() { - println!( + drop_println!( + config, "\nUnstable flags are only available on the nightly channel \ of Cargo, but this is the `{}` channel.\n\ {}", @@ -48,7 +50,8 @@ features::SEE_CHANNELS ); } - println!( + drop_println!( + config, "\nSee https://doc.rust-lang.org/nightly/cargo/reference/unstable.html \ for more information about these flags." ); @@ -58,7 +61,7 @@ let is_verbose = args.occurrences_of("verbose") > 0; if args.is_present("version") { let version = get_version_string(is_verbose); - print!("{}", version); + drop_print!(config, "{}", version); return Ok(()); } @@ -69,19 +72,19 @@ } if args.is_present("list") { - println!("Installed Commands:"); + drop_println!(config, "Installed Commands:"); for command in list_commands(config) { match command { CommandInfo::BuiltIn { name, about } => { let summary = about.unwrap_or_default(); let summary = summary.lines().next().unwrap_or(&summary); // display only the first line - println!(" {:<20} {}", name, summary) + drop_println!(config, " {:<20} {}", name, summary); } CommandInfo::External { name, path } => { if is_verbose { - println!(" {:<20} {}", name, path.display()) + drop_println!(config, " {:<20} {}", name, path.display()); } else { - println!(" {}", name) + drop_println!(config, " {}", name); } } } @@ -179,9 +182,7 @@ let quiet = args.is_present("quiet") || subcommand_args.is_present("quiet") || global_args.quiet; let global_color = global_args.color; // Extract so it can take reference. - let color = args - .value_of("color") - .or_else(|| global_color.as_ref().map(|s| s.as_ref())); + let color = args.value_of("color").or_else(|| global_color.as_deref()); let frozen = args.is_present("frozen") || global_args.frozen; let locked = args.is_present("locked") || global_args.locked; let offline = args.is_present("offline") || global_args.offline; @@ -255,6 +256,12 @@ } fn cli() -> App { + let is_rustup = std::env::var_os("RUSTUP_HOME").is_some(); + let usage = if is_rustup { + "cargo [+toolchain] [OPTIONS] [SUBCOMMAND]" + } else { + "cargo [OPTIONS] [SUBCOMMAND]" + }; App::new("cargo") .settings(&[ AppSettings::UnifiedHelpMessage, @@ -262,6 +269,7 @@ AppSettings::VersionlessSubcommands, AppSettings::AllowExternalSubcommands, ]) + .usage(usage) .template( "\ Rust's package manager @@ -273,14 +281,14 @@ {unified} Some common cargo commands are (see all commands with --list): - build Compile the current package - check Analyze the current package and report errors, but don't build object files + build, b Compile the current package + check, c Analyze the current package and report errors, but don't build object files clean Remove the target directory doc Build this package's and its dependencies' documentation new Create a new cargo package init Create a new cargo package in an existing directory - run Run a binary or example of the local package - test Run the tests + run, r Run a binary or example of the local package + test, t Run the tests bench Run the benchmarks update Update dependencies listed in Cargo.lock search Search registry for crates diff -Nru cargo-0.44.1/src/bin/cargo/commands/bench.rs cargo-0.47.0/src/bin/cargo/commands/bench.rs --- cargo-0.44.1/src/bin/cargo/commands/bench.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/bench.rs 2020-07-17 20:39:39.000000000 +0000 @@ -44,6 +44,7 @@ "no-fail-fast", "Run all benchmarks regardless of failure", )) + .arg_unit_graph() .after_help( "\ The benchmark filtering argument BENCHNAME and all the arguments following the diff -Nru cargo-0.44.1/src/bin/cargo/commands/build.rs cargo-0.47.0/src/bin/cargo/commands/build.rs --- cargo-0.44.1/src/bin/cargo/commands/build.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/build.rs 2020-07-17 20:39:39.000000000 +0000 @@ -41,6 +41,7 @@ .arg_manifest_path() .arg_message_format() .arg_build_plan() + .arg_unit_graph() .after_help( "\ All packages in the workspace are built if the `--workspace` flag is supplied. The @@ -64,12 +65,12 @@ )?; if let Some(out_dir) = args.value_of_path("out-dir", config) { - compile_opts.export_dir = Some(out_dir); + compile_opts.build_config.export_dir = Some(out_dir); } else if let Some(out_dir) = config.build_config()?.out_dir.as_ref() { let out_dir = out_dir.resolve_path(config); - compile_opts.export_dir = Some(out_dir); + compile_opts.build_config.export_dir = Some(out_dir); } - if compile_opts.export_dir.is_some() { + if compile_opts.build_config.export_dir.is_some() { config .cli_unstable() .fail_if_stable_opt("--out-dir", 6790)?; diff -Nru cargo-0.44.1/src/bin/cargo/commands/check.rs cargo-0.47.0/src/bin/cargo/commands/check.rs --- cargo-0.44.1/src/bin/cargo/commands/check.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/check.rs 2020-07-17 20:39:39.000000000 +0000 @@ -33,6 +33,7 @@ .arg_target_dir() .arg_manifest_path() .arg_message_format() + .arg_unit_graph() .after_help( "\ If the `--package` argument is given, then SPEC is a package ID specification diff -Nru cargo-0.44.1/src/bin/cargo/commands/clean.rs cargo-0.47.0/src/bin/cargo/commands/clean.rs --- cargo-0.44.1/src/bin/cargo/commands/clean.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/clean.rs 2020-07-17 20:39:39.000000000 +0000 @@ -28,7 +28,7 @@ let opts = CleanOptions { config, spec: values(args, "package"), - target: args.target(), + targets: args.targets(), requested_profile: args.get_profile_name(config, "dev", ProfileChecking::Checked)?, profile_specified: args.is_present("profile") || args.is_present("release"), doc: args.is_present("doc"), diff -Nru cargo-0.44.1/src/bin/cargo/commands/clippy.rs cargo-0.47.0/src/bin/cargo/commands/clippy.rs --- cargo-0.44.1/src/bin/cargo/commands/clippy.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/clippy.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,85 +0,0 @@ -use crate::command_prelude::*; - -use cargo::ops; -use cargo::util; - -pub fn cli() -> App { - subcommand("clippy-preview") - .about("Checks a package to catch common mistakes and improve your Rust code.") - .arg(Arg::with_name("args").multiple(true)) - .arg_package_spec( - "Package(s) to check", - "Check all packages in the workspace", - "Exclude packages from the check", - ) - .arg_jobs() - .arg_targets_all( - "Check only this package's library", - "Check only the specified binary", - "Check all binaries", - "Check only the specified example", - "Check all examples", - "Check only the specified test target", - "Check all tests", - "Check only the specified bench target", - "Check all benches", - "Check all targets", - ) - .arg_release("Check artifacts in release mode, with optimizations") - .arg_profile("Check artifacts with the specified profile") - .arg_features() - .arg_target_triple("Check for the target triple") - .arg_target_dir() - .arg_manifest_path() - .arg_message_format() - .after_help( - "\ -If the `--package` argument is given, then SPEC is a package ID specification -which indicates which package should be built. If it is not given, then the -current package is built. For more information on SPEC and its format, see the -`cargo help pkgid` command. - -All packages in the workspace are checked if the `--workspace` flag is supplied. The -`--workspace` flag is automatically assumed for a virtual manifest. -Note that `--exclude` has to be specified in conjunction with the `--workspace` flag. - -To allow or deny a lint from the command line you can use `cargo clippy --` -with: - - -W --warn OPT Set lint warnings - -A --allow OPT Set lint allowed - -D --deny OPT Set lint denied - -F --forbid OPT Set lint forbidden - -You can use tool lints to allow or deny lints from your code, eg.: - - #[allow(clippy::needless_lifetimes)] -", - ) -} - -pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { - let ws = args.workspace(config)?; - - let mode = CompileMode::Check { test: false }; - let mut compile_opts = - args.compile_options(config, mode, Some(&ws), ProfileChecking::Checked)?; - - if !config.cli_unstable().unstable_options { - return Err(anyhow::format_err!( - "`clippy-preview` is unstable, pass `-Z unstable-options` to enable it" - ) - .into()); - } - - let mut wrapper = util::process(util::config::clippy_driver()); - - if let Some(clippy_args) = args.values_of("args") { - wrapper.args(&clippy_args.collect::>()); - } - - compile_opts.build_config.primary_unit_rustc = Some(wrapper); - - ops::compile(&ws, &compile_opts)?; - Ok(()) -} diff -Nru cargo-0.44.1/src/bin/cargo/commands/doc.rs cargo-0.47.0/src/bin/cargo/commands/doc.rs --- cargo-0.44.1/src/bin/cargo/commands/doc.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/doc.rs 2020-07-17 20:39:39.000000000 +0000 @@ -30,6 +30,7 @@ .arg_target_dir() .arg_manifest_path() .arg_message_format() + .arg_unit_graph() .after_help( "\ By default the documentation for the local package and all dependencies is diff -Nru cargo-0.44.1/src/bin/cargo/commands/fetch.rs cargo-0.47.0/src/bin/cargo/commands/fetch.rs --- cargo-0.44.1/src/bin/cargo/commands/fetch.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/fetch.rs 2020-07-17 20:39:39.000000000 +0000 @@ -28,7 +28,7 @@ let opts = FetchOptions { config, - target: args.target(), + targets: args.targets(), }; let _ = ops::fetch(&ws, &opts)?; Ok(()) diff -Nru cargo-0.44.1/src/bin/cargo/commands/fix.rs cargo-0.47.0/src/bin/cargo/commands/fix.rs --- cargo-0.44.1/src/bin/cargo/commands/fix.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/fix.rs 2020-07-17 20:39:39.000000000 +0000 @@ -72,15 +72,6 @@ .long("allow-staged") .help("Fix code even if the working directory has staged changes"), ) - .arg( - Arg::with_name("clippy") - .long("clippy") - .help("Get fix suggestions from clippy instead of rustc") - .hidden(true) - .multiple(true) - .min_values(0) - .number_of_values(1), - ) .after_help( "\ This Cargo subcommand will automatically take rustc's suggestions from @@ -134,21 +125,6 @@ // code as we can. let mut opts = args.compile_options(config, mode, Some(&ws), ProfileChecking::Unchecked)?; - let use_clippy = args.is_present("clippy"); - - let clippy_args = args - .value_of("clippy") - .map(|s| s.split(' ').map(|s| s.to_string()).collect()) - .or_else(|| Some(vec![])) - .filter(|_| use_clippy); - - if use_clippy && !config.cli_unstable().unstable_options { - return Err(anyhow::format_err!( - "`cargo fix --clippy` is unstable, pass `-Z unstable-options` to enable it" - ) - .into()); - } - if let CompileFilter::Default { .. } = opts.filter { opts.filter = CompileFilter::Only { all_targets: true, @@ -171,7 +147,6 @@ allow_no_vcs: args.is_present("allow-no-vcs"), allow_staged: args.is_present("allow-staged"), broken_code: args.is_present("broken-code"), - clippy_args, }, )?; Ok(()) diff -Nru cargo-0.44.1/src/bin/cargo/commands/git_checkout.rs cargo-0.47.0/src/bin/cargo/commands/git_checkout.rs --- cargo-0.44.1/src/bin/cargo/commands/git_checkout.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/git_checkout.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,37 +1,14 @@ use crate::command_prelude::*; -use cargo::core::{GitReference, Source, SourceId}; -use cargo::sources::GitSource; -use cargo::util::IntoUrl; +const REMOVED: &str = "The `git-checkout` subcommand has been removed."; pub fn cli() -> App { subcommand("git-checkout") - .about("Checkout a copy of a Git repository") - .arg(opt("quiet", "No output printed to stdout").short("q")) - .arg( - Arg::with_name("url") - .long("url") - .value_name("URL") - .required(true), - ) - .arg( - Arg::with_name("reference") - .long("reference") - .value_name("REF") - .required(true), - ) + .about("This subcommand has been removed") + .settings(&[AppSettings::Hidden]) + .help(REMOVED) } -pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { - let url = args.value_of("url").unwrap().into_url()?; - let reference = args.value_of("reference").unwrap(); - - let reference = GitReference::Branch(reference.to_string()); - let source_id = SourceId::for_git(&url, reference)?; - - let mut source = GitSource::new(source_id, config)?; - - source.update()?; - - Ok(()) +pub fn exec(_config: &mut Config, _args: &ArgMatches<'_>) -> CliResult { + Err(anyhow::format_err!(REMOVED).into()) } diff -Nru cargo-0.44.1/src/bin/cargo/commands/install.rs cargo-0.47.0/src/bin/cargo/commands/install.rs --- cargo-0.44.1/src/bin/cargo/commands/install.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/install.rs 2020-07-17 20:39:39.000000000 +0000 @@ -18,7 +18,7 @@ .arg( opt("git", "Git URL to install the specified crate from") .value_name("URL") - .conflicts_with_all(&["path", "registry"]), + .conflicts_with_all(&["path", "index", "registry"]), ) .arg( opt("branch", "Branch to use when installing from git") @@ -38,7 +38,7 @@ .arg( opt("path", "Filesystem path to local crate to install") .value_name("PATH") - .conflicts_with_all(&["git", "registry"]), + .conflicts_with_all(&["git", "index", "registry"]), ) .arg(opt( "list", @@ -57,12 +57,19 @@ "Install all examples", ) .arg_target_triple("Build for the target triple") + .arg_target_dir() .arg(opt("root", "Directory to install packages into").value_name("DIR")) .arg( + opt("index", "Registry index to install from") + .value_name("INDEX") + .requires("crate") + .conflicts_with_all(&["git", "path", "registry"]), + ) + .arg( opt("registry", "Registry to use") .value_name("REGISTRY") .requires("crate") - .conflicts_with_all(&["git", "path"]), + .conflicts_with_all(&["git", "path", "index"]), ) .after_help( "\ @@ -100,8 +107,6 @@ } pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { - let registry = args.registry(config)?; - if let Some(path) = args.value_of_path("path", config) { config.reload_rooted_at(path)?; } else { @@ -143,8 +148,10 @@ } else if krates.is_empty() { from_cwd = true; SourceId::for_path(config.cwd())? - } else if let Some(registry) = registry { + } else if let Some(registry) = args.registry(config)? { SourceId::alt_registry(config, ®istry)? + } else if let Some(index) = args.value_of("index") { + SourceId::for_registry(&index.into_url()?)? } else { SourceId::crates_io(config)? }; @@ -156,6 +163,7 @@ ops::install_list(root, config)?; } else { ops::install( + config, root, krates, source, diff -Nru cargo-0.44.1/src/bin/cargo/commands/locate_project.rs cargo-0.47.0/src/bin/cargo/commands/locate_project.rs --- cargo-0.44.1/src/bin/cargo/commands/locate_project.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/locate_project.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,6 +1,4 @@ use crate::command_prelude::*; - -use cargo::print_json; use serde::Serialize; pub fn cli() -> App { @@ -30,6 +28,6 @@ let location = ProjectLocation { root }; - print_json(&location); + config.shell().print_json(&location); Ok(()) } diff -Nru cargo-0.44.1/src/bin/cargo/commands/metadata.rs cargo-0.47.0/src/bin/cargo/commands/metadata.rs --- cargo-0.44.1/src/bin/cargo/commands/metadata.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/metadata.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,7 +1,5 @@ use crate::command_prelude::*; - use cargo::ops::{self, OutputMetadataOptions}; -use cargo::print_json; pub fn cli() -> App { subcommand("metadata") @@ -12,13 +10,11 @@ ) .arg(opt("quiet", "No output printed to stdout").short("q")) .arg_features() - .arg( - opt( - "filter-platform", - "Only include resolve dependencies matching the given target-triple", - ) - .value_name("TRIPLE"), - ) + .arg(multi_opt( + "filter-platform", + "TRIPLE", + "Only include resolve dependencies matching the given target-triple", + )) .arg(opt( "no-deps", "Output information only about the workspace members \ @@ -51,11 +47,11 @@ all_features: args.is_present("all-features"), no_default_features: args.is_present("no-default-features"), no_deps: args.is_present("no-deps"), - filter_platform: args.value_of("filter-platform").map(|s| s.to_string()), + filter_platforms: args._values_of("filter-platform"), version, }; let result = ops::output_metadata(&ws, &options)?; - print_json(&result); + config.shell().print_json(&result); Ok(()) } diff -Nru cargo-0.44.1/src/bin/cargo/commands/mod.rs cargo-0.47.0/src/bin/cargo/commands/mod.rs --- cargo-0.44.1/src/bin/cargo/commands/mod.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/mod.rs 2020-07-17 20:39:39.000000000 +0000 @@ -6,7 +6,6 @@ build::cli(), check::cli(), clean::cli(), - clippy::cli(), doc::cli(), fetch::cli(), fix::cli(), @@ -28,6 +27,7 @@ rustdoc::cli(), search::cli(), test::cli(), + tree::cli(), uninstall::cli(), update::cli(), vendor::cli(), @@ -43,7 +43,6 @@ "build" => build::exec, "check" => check::exec, "clean" => clean::exec, - "clippy-preview" => clippy::exec, "doc" => doc::exec, "fetch" => fetch::exec, "fix" => fix::exec, @@ -65,6 +64,7 @@ "rustdoc" => rustdoc::exec, "search" => search::exec, "test" => test::exec, + "tree" => tree::exec, "uninstall" => uninstall::exec, "update" => update::exec, "vendor" => vendor::exec, @@ -80,7 +80,6 @@ pub mod build; pub mod check; pub mod clean; -pub mod clippy; pub mod doc; pub mod fetch; pub mod fix; @@ -102,6 +101,7 @@ pub mod rustdoc; pub mod search; pub mod test; +pub mod tree; pub mod uninstall; pub mod update; pub mod vendor; diff -Nru cargo-0.44.1/src/bin/cargo/commands/package.rs cargo-0.47.0/src/bin/cargo/commands/package.rs --- cargo-0.44.1/src/bin/cargo/commands/package.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/package.rs 2020-07-17 20:39:39.000000000 +0000 @@ -42,7 +42,7 @@ list: args.is_present("list"), check_metadata: !args.is_present("no-metadata"), allow_dirty: args.is_present("allow-dirty"), - target: args.target(), + targets: args.targets(), jobs: args.jobs()?, features: args._values_of("features"), all_features: args.is_present("all-features"), diff -Nru cargo-0.44.1/src/bin/cargo/commands/pkgid.rs cargo-0.47.0/src/bin/cargo/commands/pkgid.rs --- cargo-0.44.1/src/bin/cargo/commands/pkgid.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/pkgid.rs 2020-07-17 20:39:39.000000000 +0000 @@ -37,6 +37,6 @@ let ws = args.workspace(config)?; let spec = args.value_of("spec").or_else(|| args.value_of("package")); let spec = ops::pkgid(&ws, spec)?; - println!("{}", spec); + cargo::drop_println!(config, "{}", spec); Ok(()) } diff -Nru cargo-0.44.1/src/bin/cargo/commands/publish.rs cargo-0.47.0/src/bin/cargo/commands/publish.rs --- cargo-0.44.1/src/bin/cargo/commands/publish.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/publish.rs 2020-07-17 20:39:39.000000000 +0000 @@ -40,7 +40,7 @@ index, verify: !args.is_present("no-verify"), allow_dirty: args.is_present("allow-dirty"), - target: args.target(), + targets: args.targets(), jobs: args.jobs()?, dry_run: args.is_present("dry-run"), registry, diff -Nru cargo-0.44.1/src/bin/cargo/commands/read_manifest.rs cargo-0.47.0/src/bin/cargo/commands/read_manifest.rs --- cargo-0.44.1/src/bin/cargo/commands/read_manifest.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/read_manifest.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,7 +1,5 @@ use crate::command_prelude::*; -use cargo::print_json; - pub fn cli() -> App { subcommand("read-manifest") .about( @@ -17,6 +15,6 @@ pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { let ws = args.workspace(config)?; - print_json(&ws.current()?); + config.shell().print_json(&ws.current()?); Ok(()) } diff -Nru cargo-0.44.1/src/bin/cargo/commands/run.rs cargo-0.47.0/src/bin/cargo/commands/run.rs --- cargo-0.44.1/src/bin/cargo/commands/run.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/run.rs 2020-07-17 20:39:39.000000000 +0000 @@ -24,6 +24,7 @@ .arg_target_dir() .arg_manifest_path() .arg_message_format() + .arg_unit_graph() .after_help( "\ If neither `--bin` nor `--example` are given, then if the package only has one diff -Nru cargo-0.44.1/src/bin/cargo/commands/rustc.rs cargo-0.47.0/src/bin/cargo/commands/rustc.rs --- cargo-0.44.1/src/bin/cargo/commands/rustc.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/rustc.rs 2020-07-17 20:39:39.000000000 +0000 @@ -29,6 +29,7 @@ .arg_target_dir() .arg_manifest_path() .arg_message_format() + .arg_unit_graph() .after_help( "\ The specified target for the current package (or package specified by SPEC if diff -Nru cargo-0.44.1/src/bin/cargo/commands/rustdoc.rs cargo-0.47.0/src/bin/cargo/commands/rustdoc.rs --- cargo-0.44.1/src/bin/cargo/commands/rustdoc.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/rustdoc.rs 2020-07-17 20:39:39.000000000 +0000 @@ -33,6 +33,7 @@ .arg_target_dir() .arg_manifest_path() .arg_message_format() + .arg_unit_graph() .after_help( "\ The specified target for the current package (or package specified by SPEC if diff -Nru cargo-0.44.1/src/bin/cargo/commands/test.rs cargo-0.47.0/src/bin/cargo/commands/test.rs --- cargo-0.44.1/src/bin/cargo/commands/test.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/test.rs 2020-07-17 20:39:39.000000000 +0000 @@ -54,6 +54,7 @@ .arg_target_dir() .arg_manifest_path() .arg_message_format() + .arg_unit_graph() .after_help( "\ The test filtering argument TESTNAME and all the arguments following the diff -Nru cargo-0.44.1/src/bin/cargo/commands/tree.rs cargo-0.47.0/src/bin/cargo/commands/tree.rs --- cargo-0.44.1/src/bin/cargo/commands/tree.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/tree.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,269 @@ +use crate::cli; +use crate::command_prelude::*; +use anyhow::{bail, format_err}; +use cargo::core::dependency::DepKind; +use cargo::ops::tree::{self, EdgeKind}; +use cargo::ops::Packages; +use cargo::util::CargoResult; +use std::collections::HashSet; +use std::str::FromStr; + +pub fn cli() -> App { + subcommand("tree") + .about("Display a tree visualization of a dependency graph") + .arg(opt("quiet", "Suppress status messages").short("q")) + .arg_manifest_path() + .arg_package_spec_no_all( + "Package to be used as the root of the tree", + "Display the tree for all packages in the workspace", + "Exclude specific workspace members", + ) + .arg(Arg::with_name("all").long("all").short("a").hidden(true)) + .arg( + Arg::with_name("all-targets") + .long("all-targets") + .hidden(true), + ) + .arg_features() + .arg_target_triple( + "Filter dependencies matching the given target-triple (default host platform)", + ) + .arg( + Arg::with_name("no-dev-dependencies") + .long("no-dev-dependencies") + .hidden(true), + ) + .arg( + multi_opt( + "edges", + "KINDS", + "The kinds of dependencies to display \ + (features, normal, build, dev, all, no-dev, no-build, no-normal)", + ) + .short("e"), + ) + .arg( + optional_multi_opt( + "invert", + "SPEC", + "Invert the tree direction and focus on the given package", + ) + .short("i"), + ) + .arg(Arg::with_name("no-indent").long("no-indent").hidden(true)) + .arg( + Arg::with_name("prefix-depth") + .long("prefix-depth") + .hidden(true), + ) + .arg( + opt( + "prefix", + "Change the prefix (indentation) of how each entry is displayed", + ) + .value_name("PREFIX") + .possible_values(&["depth", "indent", "none"]) + .default_value("indent"), + ) + .arg(opt( + "no-dedupe", + "Do not de-duplicate (repeats all shared dependencies)", + )) + .arg( + opt( + "duplicates", + "Show only dependencies which come in multiple versions (implies -i)", + ) + .short("d") + .alias("duplicate"), + ) + .arg( + opt("charset", "Character set to use in output: utf8, ascii") + .value_name("CHARSET") + .possible_values(&["utf8", "ascii"]) + .default_value("utf8"), + ) + .arg( + opt("format", "Format string used for printing dependencies") + .value_name("FORMAT") + .short("f") + .default_value("{p}"), + ) + .arg( + // Backwards compatibility with old cargo-tree. + Arg::with_name("version") + .long("version") + .short("V") + .hidden(true), + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + if args.is_present("version") { + let verbose = args.occurrences_of("verbose") > 0; + let version = cli::get_version_string(verbose); + cargo::drop_print!(config, "{}", version); + return Ok(()); + } + let prefix = if args.is_present("no-indent") { + config + .shell() + .warn("the --no-indent flag has been changed to --prefix=none")?; + "none" + } else if args.is_present("prefix-depth") { + config + .shell() + .warn("the --prefix-depth flag has been changed to --prefix=depth")?; + "depth" + } else { + args.value_of("prefix").unwrap() + }; + let prefix = tree::Prefix::from_str(prefix).map_err(|e| anyhow::anyhow!("{}", e))?; + + let no_dedupe = args.is_present("no-dedupe") || args.is_present("all"); + if args.is_present("all") { + config.shell().warn( + "The `cargo tree` --all flag has been changed to --no-dedupe, \ + and may be removed in a future version.\n\ + If you are looking to display all workspace members, use the --workspace flag.", + )?; + } + + let targets = if args.is_present("all-targets") { + config + .shell() + .warn("the --all-targets flag has been changed to --target=all")?; + vec!["all".to_string()] + } else { + args._values_of("target") + }; + let target = tree::Target::from_cli(targets); + + let edge_kinds = parse_edge_kinds(config, args)?; + let graph_features = edge_kinds.contains(&EdgeKind::Feature); + + let packages = args.packages_from_flags()?; + let mut invert = args + .values_of("invert") + .map_or_else(|| Vec::new(), |is| is.map(|s| s.to_string()).collect()); + if args.is_present_with_zero_values("invert") { + match &packages { + Packages::Packages(ps) => { + // Backwards compatibility with old syntax of `cargo tree -i -p foo`. + invert.extend(ps.clone()); + } + _ => { + return Err(format_err!( + "The `-i` flag requires a package name.\n\ +\n\ +The `-i` flag is used to inspect the reverse dependencies of a specific\n\ +package. It will invert the tree and display the packages that depend on the\n\ +given package.\n\ +\n\ +Note that in a workspace, by default it will only display the package's\n\ +reverse dependencies inside the tree of the workspace member in the current\n\ +directory. The --workspace flag can be used to extend it so that it will show\n\ +the package's reverse dependencies across the entire workspace. The -p flag\n\ +can be used to display the package's reverse dependencies only with the\n\ +subtree of the package given to -p.\n\ +" + ) + .into()); + } + } + } + + let ws = args.workspace(config)?; + let charset = tree::Charset::from_str(args.value_of("charset").unwrap()) + .map_err(|e| anyhow::anyhow!("{}", e))?; + let opts = tree::TreeOptions { + features: values(args, "features"), + all_features: args.is_present("all-features"), + no_default_features: args.is_present("no-default-features"), + packages, + target, + edge_kinds, + invert, + prefix, + no_dedupe, + duplicates: args.is_present("duplicates"), + charset, + format: args.value_of("format").unwrap().to_string(), + graph_features, + }; + + tree::build_and_print(&ws, &opts)?; + Ok(()) +} + +fn parse_edge_kinds(config: &Config, args: &ArgMatches<'_>) -> CargoResult> { + let mut kinds: Vec<&str> = args + .values_of("edges") + .map_or_else(|| Vec::new(), |es| es.flat_map(|e| e.split(',')).collect()); + if args.is_present("no-dev-dependencies") { + config + .shell() + .warn("the --no-dev-dependencies flag has changed to -e=no-dev")?; + kinds.push("no-dev"); + } + if kinds.is_empty() { + kinds.extend(&["normal", "build", "dev"]); + } + + let mut result = HashSet::new(); + let insert_defaults = |result: &mut HashSet| { + result.insert(EdgeKind::Dep(DepKind::Normal)); + result.insert(EdgeKind::Dep(DepKind::Build)); + result.insert(EdgeKind::Dep(DepKind::Development)); + }; + let unknown = |k| { + bail!( + "unknown edge kind `{}`, valid values are \ + \"normal\", \"build\", \"dev\", \ + \"no-normal\", \"no-build\", \"no-dev\", \ + \"features\", or \"all\"", + k + ) + }; + if kinds.iter().any(|k| k.starts_with("no-")) { + insert_defaults(&mut result); + for kind in &kinds { + match *kind { + "no-normal" => result.remove(&EdgeKind::Dep(DepKind::Normal)), + "no-build" => result.remove(&EdgeKind::Dep(DepKind::Build)), + "no-dev" => result.remove(&EdgeKind::Dep(DepKind::Development)), + "features" => result.insert(EdgeKind::Feature), + "normal" | "build" | "dev" | "all" => { + bail!("`no-` dependency kinds cannot be mixed with other dependency kinds") + } + k => return unknown(k), + }; + } + return Ok(result); + } + for kind in &kinds { + match *kind { + "all" => { + insert_defaults(&mut result); + result.insert(EdgeKind::Feature); + } + "features" => { + result.insert(EdgeKind::Feature); + } + "normal" => { + result.insert(EdgeKind::Dep(DepKind::Normal)); + } + "build" => { + result.insert(EdgeKind::Dep(DepKind::Build)); + } + "dev" => { + result.insert(EdgeKind::Dep(DepKind::Development)); + } + k => return unknown(k), + } + } + if kinds.len() == 1 && kinds[0] == "features" { + insert_defaults(&mut result); + } + Ok(result) +} diff -Nru cargo-0.44.1/src/bin/cargo/commands/verify_project.rs cargo-0.47.0/src/bin/cargo/commands/verify_project.rs --- cargo-0.44.1/src/bin/cargo/commands/verify_project.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/verify_project.rs 2020-07-17 20:39:39.000000000 +0000 @@ -3,8 +3,6 @@ use std::collections::HashMap; use std::process; -use cargo::print_json; - pub fn cli() -> App { subcommand("verify-project") .about("Check correctness of crate manifest") @@ -13,19 +11,15 @@ } pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { - fn fail(reason: &str, value: &str) -> ! { + if let Err(e) = args.workspace(config) { let mut h = HashMap::new(); - h.insert(reason.to_string(), value.to_string()); - print_json(&h); + h.insert("invalid".to_string(), e.to_string()); + config.shell().print_json(&h); process::exit(1) } - if let Err(e) = args.workspace(config) { - fail("invalid", &e.to_string()) - } - let mut h = HashMap::new(); h.insert("success".to_string(), "true".to_string()); - print_json(&h); + config.shell().print_json(&h); Ok(()) } diff -Nru cargo-0.44.1/src/bin/cargo/commands/version.rs cargo-0.47.0/src/bin/cargo/commands/version.rs --- cargo-0.44.1/src/bin/cargo/commands/version.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/commands/version.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,6 +1,5 @@ -use crate::command_prelude::*; - use crate::cli; +use crate::command_prelude::*; pub fn cli() -> App { subcommand("version") @@ -8,9 +7,9 @@ .arg(opt("quiet", "No output printed to stdout").short("q")) } -pub fn exec(_config: &mut Config, args: &ArgMatches<'_>) -> CliResult { +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { let verbose = args.occurrences_of("verbose") > 0; let version = cli::get_version_string(verbose); - print!("{}", version); + cargo::drop_print!(config, "{}", version); Ok(()) } diff -Nru cargo-0.44.1/src/bin/cargo/main.rs cargo-0.47.0/src/bin/cargo/main.rs --- cargo-0.44.1/src/bin/cargo/main.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/bin/cargo/main.rs 2020-07-17 20:39:39.000000000 +0000 @@ -165,9 +165,7 @@ } #[cfg(windows)] fn is_executable>(path: P) -> bool { - fs::metadata(path) - .map(|metadata| metadata.is_file()) - .unwrap_or(false) + path.as_ref().is_file() } fn search_directories(config: &Config) -> Vec { diff -Nru cargo-0.44.1/src/cargo/core/compiler/build_config.rs cargo-0.47.0/src/cargo/core/compiler/build_config.rs --- cargo-0.44.1/src/cargo/core/compiler/build_config.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/build_config.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,15 +1,17 @@ -use crate::core::compiler::{CompileKind, CompileTarget}; -use crate::core::interning::InternedString; +use crate::core::compiler::CompileKind; +use crate::util::interning::InternedString; use crate::util::ProcessBuilder; use crate::util::{CargoResult, Config, RustfixDiagnosticServer}; +use anyhow::bail; use serde::ser; use std::cell::RefCell; +use std::path::PathBuf; /// Configuration information for a rustc build. #[derive(Debug)] pub struct BuildConfig { /// The requested kind of compilation for this session - pub requested_kind: CompileKind, + pub requested_kinds: Vec, /// Number of rustc jobs to run in parallel. pub jobs: u32, /// Build profile @@ -22,9 +24,19 @@ pub force_rebuild: bool, /// Output a build plan to stdout instead of actually compiling. pub build_plan: bool, - /// An optional override of the rustc path for primary units only + /// Output the unit graph to stdout instead of actually compiling. + pub unit_graph: bool, + /// An optional override of the rustc process for primary units pub primary_unit_rustc: Option, + /// A thread used by `cargo fix` to receive messages on a socket regarding + /// the success/failure of applying fixes. pub rustfix_diagnostic_server: RefCell>, + /// The directory to copy final artifacts to. Note that even if `out_dir` is + /// set, a copy of artifacts still could be found a `target/(debug\release)` + /// as usual. + // Note that, although the cmd-line flag name is `out-dir`, in code we use + // `export_dir`, to avoid confusion with out dir at `target/debug/deps`. + pub export_dir: Option, } impl BuildConfig { @@ -39,26 +51,11 @@ pub fn new( config: &Config, jobs: Option, - requested_target: &Option, + requested_targets: &[String], mode: CompileMode, ) -> CargoResult { let cfg = config.build_config()?; - let requested_kind = match requested_target { - Some(s) => CompileKind::Target(CompileTarget::new(s)?), - None => match &cfg.target { - Some(val) => { - let value = if val.raw_value().ends_with(".json") { - let path = val.clone().resolve_path(config); - path.to_str().expect("must be utf-8 in toml").to_string() - } else { - val.raw_value().to_string() - }; - CompileKind::Target(CompileTarget::new(&value)?) - } - None => CompileKind::Host, - }, - }; - + let requested_kinds = CompileKind::from_requested_targets(config, requested_targets)?; if jobs == Some(0) { anyhow::bail!("jobs must be at least 1") } @@ -72,15 +69,17 @@ let jobs = jobs.or(cfg.jobs).unwrap_or(::num_cpus::get() as u32); Ok(BuildConfig { - requested_kind, + requested_kinds, jobs, requested_profile: InternedString::new("dev"), mode, message_format: MessageFormat::Human, force_rebuild: false, build_plan: false, + unit_graph: false, primary_unit_rustc: None, rustfix_diagnostic_server: RefCell::new(None), + export_dir: None, }) } @@ -96,6 +95,13 @@ pub fn test(&self) -> bool { self.mode == CompileMode::Test || self.mode == CompileMode::Bench } + + pub fn single_requested_kind(&self) -> CargoResult { + match self.requested_kinds.len() { + 1 => Ok(self.requested_kinds[0]), + _ => bail!("only one `--target` argument is supported"), + } + } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] diff -Nru cargo-0.44.1/src/cargo/core/compiler/build_context/mod.rs cargo-0.47.0/src/cargo/core/compiler/build_context/mod.rs --- cargo-0.44.1/src/cargo/core/compiler/build_context/mod.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/build_context/mod.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,17 +1,17 @@ -use crate::core::compiler::unit::UnitInterner; -use crate::core::compiler::{BuildConfig, BuildOutput, CompileKind, Unit}; +use crate::core::compiler::unit_graph::UnitGraph; +use crate::core::compiler::{BuildConfig, CompileKind, Unit}; use crate::core::profiles::Profiles; -use crate::core::{InternedString, Workspace}; -use crate::core::{PackageId, PackageSet}; +use crate::core::PackageSet; +use crate::core::Workspace; use crate::util::config::Config; use crate::util::errors::CargoResult; +use crate::util::interning::InternedString; use crate::util::Rustc; use std::collections::HashMap; use std::path::PathBuf; -use std::str; mod target_info; -pub use self::target_info::{FileFlavor, RustcTargetData, TargetInfo}; +pub use self::target_info::{FileFlavor, FileType, RustcTargetData, TargetInfo}; /// The build context, containing all information about a build task. /// @@ -27,37 +27,40 @@ pub profiles: Profiles, pub build_config: &'a BuildConfig, /// Extra compiler args for either `rustc` or `rustdoc`. - pub extra_compiler_args: HashMap, Vec>, + pub extra_compiler_args: HashMap>, /// Package downloader. - pub packages: &'a PackageSet<'cfg>, - - /// Source of interning new units as they're created. - pub units: &'a UnitInterner<'a>, - + /// + /// This holds ownership of the `Package` objects. + pub packages: PackageSet<'cfg>, /// Information about rustc and the target platform. pub target_data: RustcTargetData, + /// The root units of `unit_graph` (units requested on the command-line). + pub roots: Vec, + /// The dependency graph of units to compile. + pub unit_graph: UnitGraph, } impl<'a, 'cfg> BuildContext<'a, 'cfg> { pub fn new( ws: &'a Workspace<'cfg>, - packages: &'a PackageSet<'cfg>, - config: &'cfg Config, + packages: PackageSet<'cfg>, build_config: &'a BuildConfig, profiles: Profiles, - units: &'a UnitInterner<'a>, - extra_compiler_args: HashMap, Vec>, + extra_compiler_args: HashMap>, target_data: RustcTargetData, + roots: Vec, + unit_graph: UnitGraph, ) -> CargoResult> { Ok(BuildContext { ws, + config: ws.config(), packages, - config, build_config, profiles, extra_compiler_args, - units, target_data, + roots, + unit_graph, }) } @@ -89,30 +92,15 @@ self.build_config.jobs } - pub fn rustflags_args(&self, unit: &Unit<'_>) -> &[String] { + pub fn rustflags_args(&self, unit: &Unit) -> &[String] { &self.target_data.info(unit.kind).rustflags } - pub fn rustdocflags_args(&self, unit: &Unit<'_>) -> &[String] { + pub fn rustdocflags_args(&self, unit: &Unit) -> &[String] { &self.target_data.info(unit.kind).rustdocflags } - pub fn show_warnings(&self, pkg: PackageId) -> bool { - pkg.source_id().is_path() || self.config.extra_verbose() - } - - pub fn extra_args_for(&self, unit: &Unit<'a>) -> Option<&Vec> { + pub fn extra_args_for(&self, unit: &Unit) -> Option<&Vec> { self.extra_compiler_args.get(unit) } - - /// If a build script is overridden, this returns the `BuildOutput` to use. - /// - /// `lib_name` is the `links` library name and `kind` is whether it is for - /// Host or Target. - pub fn script_override(&self, lib_name: &str, kind: CompileKind) -> Option<&BuildOutput> { - self.target_data - .target_config(kind) - .links_overrides - .get(lib_name) - } } diff -Nru cargo-0.44.1/src/cargo/core/compiler/build_context/target_info.rs cargo-0.47.0/src/cargo/core/compiler/build_context/target_info.rs --- cargo-0.44.1/src/cargo/core/compiler/build_context/target_info.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/build_context/target_info.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,6 +1,5 @@ -use crate::core::compiler::CompileKind; -use crate::core::compiler::CompileTarget; -use crate::core::{Dependency, TargetKind, Workspace}; +use crate::core::compiler::{BuildOutput, CompileKind, CompileMode, CompileTarget, CrateType}; +use crate::core::{Dependency, Target, TargetKind, Workspace}; use crate::util::config::{Config, StringList, TargetConfig}; use crate::util::{CargoResult, CargoResultExt, ProcessBuilder, Rustc}; use cargo_platform::{Cfg, CfgExpr}; @@ -26,7 +25,7 @@ /// `Some((prefix, suffix))`, for example `libcargo.so` would be /// `Some(("lib", ".so")). The value is `None` if the crate type is not /// supported. - crate_types: RefCell>>, + crate_types: RefCell>>, /// `cfg` information extracted from `rustc --print=cfg`. cfg: Vec, /// Path to the sysroot. @@ -41,6 +40,8 @@ pub rustflags: Vec, /// Extra flags to pass to `rustdoc`, see `env_args`. pub rustdocflags: Vec, + /// Remove this when it hits stable (1.45) + pub supports_embed_bitcode: Option, } /// Kind of each file generated by a Unit, part of `FileType`. @@ -48,52 +49,91 @@ pub enum FileFlavor { /// Not a special file type. Normal, - /// Like `Normal`, but not directly executable + /// Like `Normal`, but not directly executable. + /// For example, a `.wasm` file paired with the "normal" `.js` file. Auxiliary, /// Something you can link against (e.g., a library). - Linkable { rmeta: bool }, + Linkable, + /// An `.rmeta` Rust metadata file. + Rmeta, /// Piece of external debug information (e.g., `.dSYM`/`.pdb` file). DebugInfo, } /// Type of each file generated by a Unit. +#[derive(Debug)] pub struct FileType { /// The kind of file. pub flavor: FileFlavor, + /// The crate-type that generates this file. + /// + /// `None` for things that aren't associated with a specific crate type, + /// for example `rmeta` files. + pub crate_type: Option, /// The suffix for the file (for example, `.rlib`). /// This is an empty string for executables on Unix-like platforms. suffix: String, /// The prefix for the file (for example, `lib`). /// This is an empty string for things like executables. prefix: String, - /// Flag to convert hyphen to underscore. - /// - /// wasm bin targets will generate two files in deps such as - /// "web-stuff.js" and "web_stuff.wasm". Note the different usages of "-" - /// and "_". This flag indicates that the stem "web-stuff" should be - /// converted to "web_stuff". + /// Flag to convert hyphen to underscore when uplifting. should_replace_hyphens: bool, } impl FileType { - pub fn filename(&self, stem: &str) -> String { - let stem = if self.should_replace_hyphens { - stem.replace("-", "_") + /// The filename for this FileType crated by rustc. + pub fn output_filename(&self, target: &Target, metadata: Option<&str>) -> String { + match metadata { + Some(metadata) => format!( + "{}{}-{}{}", + self.prefix, + target.crate_name(), + metadata, + self.suffix + ), + None => format!("{}{}{}", self.prefix, target.crate_name(), self.suffix), + } + } + + /// The filename for this FileType that Cargo should use when "uplifting" + /// it to the destination directory. + pub fn uplift_filename(&self, target: &Target) -> String { + let name = if self.should_replace_hyphens { + target.crate_name() } else { - stem.to_string() + target.name().to_string() }; - format!("{}{}{}", self.prefix, stem, self.suffix) + format!("{}{}{}", self.prefix, name, self.suffix) + } + + /// Creates a new instance representing a `.rmeta` file. + pub fn new_rmeta() -> FileType { + // Note that even binaries use the `lib` prefix. + FileType { + flavor: FileFlavor::Rmeta, + crate_type: None, + suffix: ".rmeta".to_string(), + prefix: "lib".to_string(), + should_replace_hyphens: true, + } } } impl TargetInfo { pub fn new( config: &Config, - requested_kind: CompileKind, + requested_kinds: &[CompileKind], rustc: &Rustc, kind: CompileKind, ) -> CargoResult { - let rustflags = env_args(config, requested_kind, &rustc.host, None, kind, "RUSTFLAGS")?; + let rustflags = env_args( + config, + requested_kinds, + &rustc.host, + None, + kind, + "RUSTFLAGS", + )?; let mut process = rustc.process(); process .arg("-") @@ -103,15 +143,28 @@ .args(&rustflags) .env_remove("RUSTC_LOG"); + let mut embed_bitcode_test = process.clone(); + embed_bitcode_test.arg("-Cembed-bitcode"); + let supports_embed_bitcode = match kind { + CompileKind::Host => Some(rustc.cached_output(&embed_bitcode_test).is_ok()), + _ => None, + }; + if let CompileKind::Target(target) = kind { process.arg("--target").arg(target.rustc_target()); } let crate_type_process = process.clone(); - const KNOWN_CRATE_TYPES: &[&str] = - &["bin", "rlib", "dylib", "cdylib", "staticlib", "proc-macro"]; + const KNOWN_CRATE_TYPES: &[CrateType] = &[ + CrateType::Bin, + CrateType::Rlib, + CrateType::Dylib, + CrateType::Cdylib, + CrateType::Staticlib, + CrateType::ProcMacro, + ]; for crate_type in KNOWN_CRATE_TYPES.iter() { - process.arg("--crate-type").arg(crate_type); + process.arg("--crate-type").arg(crate_type.as_str()); } process.arg("--print=sysroot"); @@ -125,7 +178,7 @@ let mut map = HashMap::new(); for crate_type in KNOWN_CRATE_TYPES { let out = parse_crate_type(crate_type, &process, &output, &error, &mut lines)?; - map.insert(crate_type.to_string(), out); + map.insert(crate_type.clone(), out); } let line = match lines.next() { @@ -172,7 +225,7 @@ // information rustflags: env_args( config, - requested_kind, + requested_kinds, &rustc.host, Some(&cfg), kind, @@ -180,13 +233,14 @@ )?, rustdocflags: env_args( config, - requested_kind, + requested_kinds, &rustc.host, Some(&cfg), kind, "RUSTDOCFLAGS", )?, cfg, + supports_embed_bitcode, }) } @@ -210,15 +264,20 @@ /// Returns the list of file types generated by the given crate type. /// /// Returns `None` if the target does not support the given crate type. - pub fn file_types( + fn file_types( &self, - crate_type: &str, + crate_type: &CrateType, flavor: FileFlavor, - kind: &TargetKind, target_triple: &str, ) -> CargoResult>> { + let crate_type = if *crate_type == CrateType::Lib { + CrateType::Rlib + } else { + crate_type.clone() + }; + let mut crate_types = self.crate_types.borrow_mut(); - let entry = crate_types.entry(crate_type.to_string()); + let entry = crate_types.entry(crate_type.clone()); let crate_type_info = match entry { Entry::Occupied(o) => &*o.into_mut(), Entry::Vacant(v) => { @@ -234,47 +293,95 @@ suffix: suffix.clone(), prefix: prefix.clone(), flavor, - should_replace_hyphens: false, + crate_type: Some(crate_type.clone()), + should_replace_hyphens: crate_type != CrateType::Bin, }]; - // See rust-lang/cargo#4500. - if target_triple.ends_with("-windows-msvc") - && crate_type.ends_with("dylib") - && suffix == ".dll" - { - ret.push(FileType { - suffix: ".dll.lib".to_string(), - prefix: prefix.clone(), - flavor: FileFlavor::Normal, - should_replace_hyphens: false, - }) + // Window shared library import/export files. + if crate_type.is_dynamic() { + // Note: Custom JSON specs can alter the suffix. For now, we'll + // just ignore non-DLL suffixes. + if target_triple.ends_with("-windows-msvc") && suffix == ".dll" { + // See https://docs.microsoft.com/en-us/cpp/build/reference/working-with-import-libraries-and-export-files + // for more information about DLL import/export files. + ret.push(FileType { + suffix: ".dll.lib".to_string(), + prefix: prefix.clone(), + flavor: FileFlavor::Auxiliary, + crate_type: Some(crate_type.clone()), + should_replace_hyphens: true, + }); + // NOTE: lld does not produce these + ret.push(FileType { + suffix: ".dll.exp".to_string(), + prefix: prefix.clone(), + flavor: FileFlavor::Auxiliary, + crate_type: Some(crate_type.clone()), + should_replace_hyphens: true, + }); + } else if target_triple.ends_with("windows-gnu") && suffix == ".dll" { + // See https://cygwin.com/cygwin-ug-net/dll.html for more + // information about GNU import libraries. + // LD can link DLL directly, but LLD requires the import library. + ret.push(FileType { + suffix: ".dll.a".to_string(), + prefix: "lib".to_string(), + flavor: FileFlavor::Auxiliary, + crate_type: Some(crate_type.clone()), + should_replace_hyphens: true, + }) + } } - // See rust-lang/cargo#4535. - if target_triple.starts_with("wasm32-") && crate_type == "bin" && suffix == ".js" { + if target_triple.starts_with("wasm32-") && crate_type == CrateType::Bin && suffix == ".js" { + // emscripten binaries generate a .js file, which loads a .wasm + // file. ret.push(FileType { suffix: ".wasm".to_string(), prefix: prefix.clone(), flavor: FileFlavor::Auxiliary, + crate_type: Some(crate_type.clone()), + // Name `foo-bar` will generate a `foo_bar.js` and + // `foo_bar.wasm`. Cargo will translate the underscore and + // copy `foo_bar.js` to `foo-bar.js`. However, the wasm + // filename is embedded in the .js file with an underscore, so + // it should not contain hyphens. should_replace_hyphens: true, - }) + }); + // And a map file for debugging. This is only emitted with debug=2 + // (-g4 for emcc). + ret.push(FileType { + suffix: ".wasm.map".to_string(), + prefix: prefix.clone(), + flavor: FileFlavor::DebugInfo, + crate_type: Some(crate_type.clone()), + should_replace_hyphens: true, + }); } - // See rust-lang/cargo#4490, rust-lang/cargo#4960. - // Only uplift debuginfo for binaries. - // - Tests are run directly from `target/debug/deps/` with the - // metadata hash still in the filename. - // - Examples are only uplifted for apple because the symbol file - // needs to match the executable file name to be found (i.e., it - // needs to remove the hash in the filename). On Windows, the path - // to the .pdb with the hash is embedded in the executable. + // Handle separate debug files. let is_apple = target_triple.contains("-apple-"); - if *kind == TargetKind::Bin || (*kind == TargetKind::ExampleBin && is_apple) { + if matches!( + crate_type, + CrateType::Bin | CrateType::Dylib | CrateType::Cdylib | CrateType::ProcMacro + ) { if is_apple { + let suffix = if crate_type == CrateType::Bin { + ".dSYM".to_string() + } else { + ".dylib.dSYM".to_string() + }; ret.push(FileType { - suffix: ".dSYM".to_string(), + suffix, prefix: prefix.clone(), flavor: FileFlavor::DebugInfo, + crate_type: Some(crate_type), + // macOS tools like lldb use all sorts of magic to locate + // dSYM files. See https://lldb.llvm.org/use/symbols.html + // for some details. It seems like a `.dSYM` located next + // to the executable with the same name is one method. The + // dSYM should have the same hyphens as the executable for + // the names to match. should_replace_hyphens: false, }) } else if target_triple.ends_with("-msvc") { @@ -282,7 +389,14 @@ suffix: ".pdb".to_string(), prefix: prefix.clone(), flavor: FileFlavor::DebugInfo, - should_replace_hyphens: false, + crate_type: Some(crate_type), + // The absolute path to the pdb file is embedded in the + // executable. If the exe/pdb pair is moved to another + // machine, then debuggers will look in the same directory + // of the exe with the original pdb filename. Since the + // original name contains underscores, they need to be + // preserved. + should_replace_hyphens: true, }) } } @@ -290,10 +404,10 @@ Ok(Some(ret)) } - fn discover_crate_type(&self, crate_type: &str) -> CargoResult> { + fn discover_crate_type(&self, crate_type: &CrateType) -> CargoResult> { let mut process = self.crate_type_process.clone(); - process.arg("--crate-type").arg(crate_type); + process.arg("--crate-type").arg(crate_type.as_str()); let output = process.exec_with_output().chain_err(|| { format!( @@ -312,6 +426,62 @@ &mut output.lines(), )?) } + + /// Returns all the file types generated by rustc for the given mode/target_kind. + /// + /// The first value is a Vec of file types generated, the second value is + /// a list of CrateTypes that are not supported by the given target. + pub fn rustc_outputs( + &self, + mode: CompileMode, + target_kind: &TargetKind, + target_triple: &str, + ) -> CargoResult<(Vec, Vec)> { + match mode { + CompileMode::Build => self.calc_rustc_outputs(target_kind, target_triple), + CompileMode::Test | CompileMode::Bench => { + match self.file_types(&CrateType::Bin, FileFlavor::Normal, target_triple)? { + Some(fts) => Ok((fts, Vec::new())), + None => Ok((Vec::new(), vec![CrateType::Bin])), + } + } + CompileMode::Check { .. } => Ok((vec![FileType::new_rmeta()], Vec::new())), + CompileMode::Doc { .. } | CompileMode::Doctest | CompileMode::RunCustomBuild => { + panic!("asked for rustc output for non-rustc mode") + } + } + } + + fn calc_rustc_outputs( + &self, + target_kind: &TargetKind, + target_triple: &str, + ) -> CargoResult<(Vec, Vec)> { + let mut unsupported = Vec::new(); + let mut result = Vec::new(); + let crate_types = target_kind.rustc_crate_types(); + for crate_type in &crate_types { + let flavor = if crate_type.is_linkable() { + FileFlavor::Linkable + } else { + FileFlavor::Normal + }; + let file_types = self.file_types(crate_type, flavor, target_triple)?; + match file_types { + Some(types) => { + result.extend(types); + } + None => { + unsupported.push(crate_type.clone()); + } + } + } + if !result.is_empty() && !crate_types.iter().any(|ct| ct.requires_upstream_objects()) { + // Only add rmeta if pipelining. + result.push(FileType::new_rmeta()); + } + Ok((result, unsupported)) + } } /// Takes rustc output (using specialized command line args), and calculates the file prefix and @@ -324,7 +494,7 @@ /// This function can not handle more than one file per type (with wasm32-unknown-emscripten, there /// are two files for bin (`.wasm` and `.js`)). fn parse_crate_type( - crate_type: &str, + crate_type: &CrateType, cmd: &ProcessBuilder, output: &str, error: &str, @@ -394,7 +564,7 @@ /// scripts, ...), even if it is the same as the target. fn env_args( config: &Config, - requested_kind: CompileKind, + requested_kinds: &[CompileKind], host_triple: &str, target_cfg: Option<&[Cfg]>, kind: CompileKind, @@ -419,7 +589,7 @@ // This means that, e.g., even if the specified --target is the // same as the host, build scripts in plugins won't get // RUSTFLAGS. - if !requested_kind.is_host() && kind.is_host() { + if requested_kinds != [CompileKind::Host] && kind.is_host() { // This is probably a build script or plugin and we're // compiling with --target. In this scenario there are // no rustflags we can apply. @@ -496,28 +666,31 @@ host_info: TargetInfo, /// Build information for targets that we're building for. This will be - /// empty if the `--target` flag is not passed, and currently also only ever - /// has at most one entry, but eventually we'd like to support multi-target - /// builds with Cargo. + /// empty if the `--target` flag is not passed. target_config: HashMap, target_info: HashMap, } impl RustcTargetData { - pub fn new(ws: &Workspace<'_>, requested_kind: CompileKind) -> CargoResult { + pub fn new( + ws: &Workspace<'_>, + requested_kinds: &[CompileKind], + ) -> CargoResult { let config = ws.config(); let rustc = config.load_global_rustc(Some(ws))?; let host_config = config.target_cfg_triple(&rustc.host)?; - let host_info = TargetInfo::new(config, requested_kind, &rustc, CompileKind::Host)?; + let host_info = TargetInfo::new(config, requested_kinds, &rustc, CompileKind::Host)?; let mut target_config = HashMap::new(); let mut target_info = HashMap::new(); - if let CompileKind::Target(target) = requested_kind { - let tcfg = config.target_cfg_triple(target.short_name())?; - target_config.insert(target, tcfg); - target_info.insert( - target, - TargetInfo::new(config, requested_kind, &rustc, CompileKind::Target(target))?, - ); + for kind in requested_kinds { + if let CompileKind::Target(target) = *kind { + let tcfg = config.target_cfg_triple(target.short_name())?; + target_config.insert(target, tcfg); + target_info.insert( + target, + TargetInfo::new(config, requested_kinds, &rustc, *kind)?, + ); + } } Ok(RustcTargetData { @@ -571,4 +744,12 @@ CompileKind::Target(s) => &self.target_config[&s], } } + + /// If a build script is overridden, this returns the `BuildOutput` to use. + /// + /// `lib_name` is the `links` library name and `kind` is whether it is for + /// Host or Target. + pub fn script_override(&self, lib_name: &str, kind: CompileKind) -> Option<&BuildOutput> { + self.target_config(kind).links_overrides.get(lib_name) + } } diff -Nru cargo-0.44.1/src/cargo/core/compiler/build_plan.rs cargo-0.47.0/src/cargo/core/compiler/build_plan.rs --- cargo-0.44.1/src/cargo/core/compiler/build_plan.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/build_plan.rs 2020-07-17 20:39:39.000000000 +0000 @@ -14,7 +14,7 @@ use super::context::OutputFile; use super::{CompileKind, CompileMode, Context, Unit}; use crate::core::TargetKind; -use crate::util::{internal, CargoResult, ProcessBuilder}; +use crate::util::{internal, CargoResult, Config, ProcessBuilder}; #[derive(Debug, Serialize)] struct Invocation { @@ -45,7 +45,7 @@ } impl Invocation { - pub fn new(unit: &Unit<'_>, deps: Vec) -> Invocation { + pub fn new(unit: &Unit, deps: Vec) -> Invocation { let id = unit.pkg.package_id(); Invocation { package_name: id.name().to_string(), @@ -109,7 +109,7 @@ } } - pub fn add<'a>(&mut self, cx: &Context<'a, '_>, unit: &Unit<'a>) -> CargoResult<()> { + pub fn add(&mut self, cx: &Context<'_, '_>, unit: &Unit) -> CargoResult<()> { let id = self.plan.invocations.len(); self.invocation_map.insert(unit.buildkey(), id); let deps = cx @@ -146,9 +146,9 @@ self.plan.inputs = inputs; } - pub fn output_plan(self) { + pub fn output_plan(self, config: &Config) { let encoded = serde_json::to_string(&self.plan).unwrap(); - println!("{}", encoded); + crate::drop_println!(config, "{}", encoded); } } diff -Nru cargo-0.44.1/src/cargo/core/compiler/compilation.rs cargo-0.47.0/src/cargo/core/compiler/compilation.rs --- cargo-0.44.1/src/cargo/core/compiler/compilation.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/compilation.rs 2020-07-17 20:39:39.000000000 +0000 @@ -8,29 +8,34 @@ use super::BuildContext; use crate::core::compiler::CompileKind; -use crate::core::{Edition, Package, PackageId, Target}; +use crate::core::compiler::Unit; +use crate::core::{Edition, Package, PackageId}; use crate::util::{self, config, join_paths, process, CargoResult, Config, ProcessBuilder}; /// Structure with enough information to run `rustdoc --test`. pub struct Doctest { - /// The package being doc-tested. - pub package: Package, - /// The target being tested (currently always the package's lib). - pub target: Target, + /// What's being doctested + pub unit: Unit, /// Arguments needed to pass to rustdoc to run this test. pub args: Vec, /// Whether or not -Zunstable-options is needed. pub unstable_opts: bool, + /// The -Clinker value to use. + pub linker: Option, } /// A structure returning the result of a compilation. pub struct Compilation<'cfg> { /// An array of all tests created during this compilation. - /// `(package, target, path_to_test_exe)` - pub tests: Vec<(Package, Target, PathBuf)>, + /// `(unit, path_to_test_exe)` where `unit` contains information such as the + /// package, compile target, etc. + pub tests: Vec<(Unit, PathBuf)>, /// An array of all binaries created. - pub binaries: Vec, + pub binaries: Vec<(Unit, PathBuf)>, + + /// An array of all cdylibs created. + pub cdylibs: Vec<(Unit, PathBuf)>, /// All directories for the output of native build commands. /// @@ -41,20 +46,17 @@ pub native_dirs: BTreeSet, /// Root output directory (for the local package's artifacts) - pub root_output: PathBuf, + pub root_output: HashMap, /// Output directory for rust dependencies. /// May be for the host or for a specific target. - pub deps_output: PathBuf, - - /// Output directory for the rust host dependencies. - pub host_deps_output: PathBuf, + pub deps_output: HashMap, - /// The path to rustc's own libstd - pub host_dylib_path: PathBuf, + /// The path to the host libdir for the compiler used + sysroot_host_libdir: PathBuf, - /// The path to libstd for the target - pub target_dylib_path: PathBuf, + /// The path to libstd for each target + sysroot_target_libdir: HashMap, /// Extra environment variables that were passed to compilations and should /// be passed to future invocations of programs. @@ -69,29 +71,33 @@ /// Flags to pass to rustdoc when invoked from cargo test, per package. pub rustdocflags: HashMap>, + /// The target host triple. pub host: String, - pub target: String, config: &'cfg Config, + + /// Rustc process to be used by default rustc_process: ProcessBuilder, - primary_unit_rustc_process: Option, + /// Rustc process to be used for workspace crates instead of rustc_process + rustc_workspace_wrapper_process: ProcessBuilder, + /// Optional rustc process to be used for primary crates instead of either rustc_process or + /// rustc_workspace_wrapper_process + primary_rustc_process: Option, - target_runner: Option<(PathBuf, Vec)>, + target_runners: HashMap)>>, } impl<'cfg> Compilation<'cfg> { - pub fn new<'a>( - bcx: &BuildContext<'a, 'cfg>, - default_kind: CompileKind, - ) -> CargoResult> { + pub fn new<'a>(bcx: &BuildContext<'a, 'cfg>) -> CargoResult> { let mut rustc = bcx.rustc().process(); - - let mut primary_unit_rustc_process = bcx.build_config.primary_unit_rustc.clone(); + let mut primary_rustc_process = bcx.build_config.primary_unit_rustc.clone(); + let mut rustc_workspace_wrapper_process = bcx.rustc().workspace_process(); if bcx.config.extra_verbose() { rustc.display_env_vars(); + rustc_workspace_wrapper_process.display_env_vars(); - if let Some(rustc) = primary_unit_rustc_process.as_mut() { + if let Some(rustc) = primary_rustc_process.as_mut() { rustc.display_env_vars(); } } @@ -99,56 +105,77 @@ Ok(Compilation { // TODO: deprecated; remove. native_dirs: BTreeSet::new(), - root_output: PathBuf::from("/"), - deps_output: PathBuf::from("/"), - host_deps_output: PathBuf::from("/"), - host_dylib_path: bcx + root_output: HashMap::new(), + deps_output: HashMap::new(), + sysroot_host_libdir: bcx .target_data .info(CompileKind::Host) .sysroot_host_libdir .clone(), - target_dylib_path: bcx - .target_data - .info(default_kind) - .sysroot_target_libdir - .clone(), + sysroot_target_libdir: bcx + .build_config + .requested_kinds + .iter() + .chain(Some(&CompileKind::Host)) + .map(|kind| { + ( + *kind, + bcx.target_data.info(*kind).sysroot_target_libdir.clone(), + ) + }) + .collect(), tests: Vec::new(), binaries: Vec::new(), + cdylibs: Vec::new(), extra_env: HashMap::new(), to_doc_test: Vec::new(), cfgs: HashMap::new(), rustdocflags: HashMap::new(), config: bcx.config, - rustc_process: rustc, - primary_unit_rustc_process, host: bcx.host_triple().to_string(), - target: bcx.target_data.short_name(&default_kind).to_string(), - target_runner: target_runner(bcx, default_kind)?, + rustc_process: rustc, + rustc_workspace_wrapper_process, + primary_rustc_process, + target_runners: bcx + .build_config + .requested_kinds + .iter() + .chain(Some(&CompileKind::Host)) + .map(|kind| Ok((*kind, target_runner(bcx, *kind)?))) + .collect::>>()?, }) } /// See `process`. - pub fn rustc_process(&self, pkg: &Package, is_primary: bool) -> CargoResult { - let rustc = if is_primary { - self.primary_unit_rustc_process - .clone() - .unwrap_or_else(|| self.rustc_process.clone()) + pub fn rustc_process( + &self, + unit: &Unit, + is_primary: bool, + is_workspace: bool, + ) -> CargoResult { + let rustc = if is_primary && self.primary_rustc_process.is_some() { + self.primary_rustc_process.clone().unwrap() + } else if is_workspace { + self.rustc_workspace_wrapper_process.clone() } else { self.rustc_process.clone() }; - self.fill_env(rustc, pkg, true) + let cmd = fill_rustc_tool_env(rustc, unit); + self.fill_env(cmd, &unit.pkg, unit.kind, true) } /// See `process`. - pub fn rustdoc_process(&self, pkg: &Package, target: &Target) -> CargoResult { - let mut p = self.fill_env(process(&*self.config.rustdoc()?), pkg, false)?; - if target.edition() != Edition::Edition2015 { - p.arg(format!("--edition={}", target.edition())); + pub fn rustdoc_process(&self, unit: &Unit) -> CargoResult { + let rustdoc = process(&*self.config.rustdoc()?); + let cmd = fill_rustc_tool_env(rustdoc, unit); + let mut p = self.fill_env(cmd, &unit.pkg, unit.kind, true)?; + if unit.target.edition() != Edition::Edition2015 { + p.arg(format!("--edition={}", unit.target.edition())); } - for crate_type in target.rustc_crate_types() { - p.arg("--crate-type").arg(crate_type); + for crate_type in unit.target.rustc_crate_types() { + p.arg("--crate-type").arg(crate_type.as_str()); } Ok(p) @@ -160,20 +187,21 @@ cmd: T, pkg: &Package, ) -> CargoResult { - self.fill_env(process(cmd), pkg, true) + self.fill_env(process(cmd), pkg, CompileKind::Host, false) } - pub fn target_runner(&self) -> &Option<(PathBuf, Vec)> { - &self.target_runner + pub fn target_runner(&self, kind: CompileKind) -> Option<&(PathBuf, Vec)> { + self.target_runners.get(&kind).and_then(|x| x.as_ref()) } /// See `process`. pub fn target_process>( &self, cmd: T, + kind: CompileKind, pkg: &Package, ) -> CargoResult { - let builder = if let Some((ref runner, ref args)) = *self.target_runner() { + let builder = if let Some((runner, args)) = self.target_runner(kind) { let mut builder = process(runner); builder.args(args); builder.arg(cmd); @@ -181,7 +209,7 @@ } else { process(cmd) }; - self.fill_env(builder, pkg, false) + self.fill_env(builder, pkg, kind, false) } /// Prepares a new process with an appropriate environment to run against @@ -193,26 +221,28 @@ &self, mut cmd: ProcessBuilder, pkg: &Package, - is_host: bool, + kind: CompileKind, + is_rustc_tool: bool, ) -> CargoResult { - let mut search_path = if is_host { - let mut search_path = vec![self.host_deps_output.clone()]; - search_path.push(self.host_dylib_path.clone()); - search_path + let mut search_path = Vec::new(); + if is_rustc_tool { + search_path.push(self.deps_output[&CompileKind::Host].clone()); + search_path.push(self.sysroot_host_libdir.clone()); } else { - let mut search_path = - super::filter_dynamic_search_path(self.native_dirs.iter(), &self.root_output); - search_path.push(self.deps_output.clone()); - search_path.push(self.root_output.clone()); + search_path.extend(super::filter_dynamic_search_path( + self.native_dirs.iter(), + &self.root_output[&kind], + )); + search_path.push(self.deps_output[&kind].clone()); + search_path.push(self.root_output[&kind].clone()); // For build-std, we don't want to accidentally pull in any shared // libs from the sysroot that ships with rustc. This may not be // required (at least I cannot craft a situation where it // matters), but is here to be safe. if self.config.cli_unstable().build_std.is_none() { - search_path.push(self.target_dylib_path.clone()); + search_path.push(self.sysroot_target_libdir[&kind].clone()); } - search_path - }; + } let dylib_path = util::dylib_path(); let dylib_path_is_empty = dylib_path.is_empty(); @@ -267,12 +297,30 @@ "CARGO_PKG_REPOSITORY", metadata.repository.as_ref().unwrap_or(&String::new()), ) + .env( + "CARGO_PKG_LICENSE", + metadata.license.as_ref().unwrap_or(&String::new()), + ) + .env( + "CARGO_PKG_LICENSE_FILE", + metadata.license_file.as_ref().unwrap_or(&String::new()), + ) .env("CARGO_PKG_AUTHORS", &pkg.authors().join(":")) .cwd(pkg.root()); Ok(cmd) } } +/// Prepares a rustc_tool process with additional environment variables +/// that are only relevant in a context that has a unit +fn fill_rustc_tool_env(mut cmd: ProcessBuilder, unit: &Unit) -> ProcessBuilder { + if unit.target.is_bin() { + cmd.env("CARGO_BIN_NAME", unit.target.name()); + } + cmd.env("CARGO_CRATE_NAME", unit.target.crate_name()); + cmd +} + fn pre_version_component(v: &Version) -> String { if v.pre.is_empty() { return String::new(); diff -Nru cargo-0.44.1/src/cargo/core/compiler/compile_kind.rs cargo-0.47.0/src/cargo/core/compiler/compile_kind.rs --- cargo-0.44.1/src/cargo/core/compiler/compile_kind.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/compile_kind.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,6 +1,10 @@ -use crate::core::{InternedString, Target}; +use crate::core::Target; use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::interning::InternedString; +use crate::util::Config; +use anyhow::bail; use serde::Serialize; +use std::collections::BTreeSet; use std::path::Path; /// Indicator for how a unit is being compiled. @@ -9,7 +13,7 @@ /// compilations, where cross compilations happen at the request of `--target` /// and host compilations happen for things like build scripts and procedural /// macros. -#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy, PartialOrd, Ord, Serialize)] +#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy, PartialOrd, Ord)] pub enum CompileKind { /// Attached to a unit that is compiled for the "host" system or otherwise /// is compiled without a `--target` flag. This is used for procedural @@ -39,6 +43,56 @@ CompileKind::Target(n) => CompileKind::Target(n), } } + + /// Creates a new list of `CompileKind` based on the requested list of + /// targets. + /// + /// If no targets are given then this returns a single-element vector with + /// `CompileKind::Host`. + pub fn from_requested_targets( + config: &Config, + targets: &[String], + ) -> CargoResult> { + if targets.len() > 1 && !config.cli_unstable().multitarget { + bail!("specifying multiple `--target` flags requires `-Zmultitarget`") + } + if !targets.is_empty() { + return Ok(targets + .iter() + .map(|value| Ok(CompileKind::Target(CompileTarget::new(value)?))) + // First collect into a set to deduplicate any `--target` passed + // more than once... + .collect::>>()? + // ... then generate a flat list for everything else to use. + .into_iter() + .collect()); + } + let kind = match &config.build_config()?.target { + Some(val) => { + let value = if val.raw_value().ends_with(".json") { + let path = val.clone().resolve_path(config); + path.to_str().expect("must be utf-8 in toml").to_string() + } else { + val.raw_value().to_string() + }; + CompileKind::Target(CompileTarget::new(&value)?) + } + None => CompileKind::Host, + }; + Ok(vec![kind]) + } +} + +impl serde::ser::Serialize for CompileKind { + fn serialize(&self, s: S) -> Result + where + S: serde::ser::Serializer, + { + match self { + CompileKind::Host => None::<&str>.serialize(s), + CompileKind::Target(t) => Some(t.name).serialize(s), + } + } } /// Abstraction for the representation of a compilation target that Cargo has. diff -Nru cargo-0.44.1/src/cargo/core/compiler/context/compilation_files.rs cargo-0.47.0/src/cargo/core/compiler/context/compilation_files.rs --- cargo-0.44.1/src/cargo/core/compiler/context/compilation_files.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/context/compilation_files.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,7 +1,7 @@ use std::collections::HashMap; use std::env; use std::fmt; -use std::hash::{Hash, Hasher, SipHasher}; +use std::hash::{Hash, Hasher}; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -9,18 +9,33 @@ use log::info; use super::{BuildContext, CompileKind, Context, FileFlavor, Layout}; -use crate::core::compiler::{CompileMode, CompileTarget, Unit}; +use crate::core::compiler::{CompileMode, CompileTarget, CrateType, FileType, Unit}; use crate::core::{Target, TargetKind, Workspace}; -use crate::util::{self, CargoResult}; +use crate::util::{self, CargoResult, StableHasher}; -/// The `Metadata` is a hash used to make unique file names for each unit in a build. +/// This is a generic version number that can be changed to make +/// backwards-incompatible changes to any file structures in the output +/// directory. For example, the fingerprint files or the build-script +/// output files. Normally cargo updates ship with rustc updates which will +/// cause a new hash due to the rustc version changing, but this allows +/// cargo to be extra careful to deal with different versions of cargo that +/// use the same rustc version. +const METADATA_VERSION: u8 = 2; + +/// The `Metadata` is a hash used to make unique file names for each unit in a +/// build. It is also use for symbol mangling. +/// /// For example: /// - A project may depend on crate `A` and crate `B`, so the package name must be in the file name. /// - Similarly a project may depend on two versions of `A`, so the version must be in the file name. +/// /// In general this must include all things that need to be distinguished in different parts of /// the same build. This is absolutely required or we override things before /// we get chance to use them. /// +/// It is also used for symbol mangling, because if you have two versions of +/// the same crate linked together, their symbols need to be differentiated. +/// /// We use a hash because it is an easy way to guarantee /// that all the inputs can be converted to a valid path. /// @@ -39,6 +54,15 @@ /// more space than needed. This makes not including something in `Metadata` /// a form of cache invalidation. /// +/// You should also avoid anything that would interfere with reproducible +/// builds. For example, *any* absolute path should be avoided. This is one +/// reason that `RUSTFLAGS` is not in `Metadata`, because it often has +/// absolute paths (like `--remap-path-prefix` which is fundamentally used for +/// reproducible builds and has absolute paths in it). Also, in some cases the +/// mangled symbols need to be stable between different builds with different +/// settings. For example, profile-guided optimizations need to swap +/// `RUSTFLAGS` between runs, but needs to keep the same symbol names. +/// /// Note that the `Fingerprint` is in charge of tracking everything needed to determine if a /// rebuild is needed. #[derive(Copy, Clone, Hash, Eq, PartialEq, Ord, PartialOrd)] @@ -67,15 +91,12 @@ export_dir: Option, /// The root targets requested by the user on the command line (does not /// include dependencies). - roots: Vec>, + roots: Vec, ws: &'a Workspace<'cfg>, /// Metadata hash to use for each unit. - /// - /// `None` if the unit should not use a metadata data hash (like rustdoc, - /// or some dylibs). - metas: HashMap, Option>, + metas: HashMap>, /// For each Unit, a list all files produced. - outputs: HashMap, LazyCell>>>, + outputs: HashMap>>>, } /// Info about a single file emitted by the compiler. @@ -104,15 +125,12 @@ impl<'a, 'cfg: 'a> CompilationFiles<'a, 'cfg> { pub(super) fn new( - roots: &[Unit<'a>], + cx: &Context<'a, 'cfg>, host: Layout, target: HashMap, - export_dir: Option, - ws: &'a Workspace<'cfg>, - cx: &Context<'a, 'cfg>, ) -> CompilationFiles<'a, 'cfg> { let mut metas = HashMap::new(); - for unit in roots { + for unit in &cx.bcx.roots { metadata_of(unit, cx, &mut metas); } let outputs = metas @@ -121,11 +139,11 @@ .map(|unit| (unit, LazyCell::new())) .collect(); CompilationFiles { - ws, + ws: cx.bcx.ws, host, target, - export_dir, - roots: roots.to_vec(), + export_dir: cx.bcx.build_config.export_dir.clone(), + roots: cx.bcx.roots.clone(), metas, outputs, } @@ -139,27 +157,26 @@ } } - /// Gets the metadata for a target in a specific profile. - /// We build to the path `"{filename}-{target_metadata}"`. - /// We use a linking step to link/copy to a predictable filename - /// like `target/debug/libfoo.{a,so,rlib}` and such. + /// Gets the metadata for the given unit. + /// + /// See module docs for more details. /// - /// Returns `None` if the unit should not use a metadata data hash (like + /// Returns `None` if the unit should not use a metadata hash (like /// rustdoc, or some dylibs). - pub fn metadata(&self, unit: &Unit<'a>) -> Option { + pub fn metadata(&self, unit: &Unit) -> Option { self.metas[unit] } /// Gets the short hash based only on the `PackageId`. /// Used for the metadata when `metadata` returns `None`. - pub fn target_short_hash(&self, unit: &Unit<'_>) -> String { + pub fn target_short_hash(&self, unit: &Unit) -> String { let hashable = unit.pkg.package_id().stable_hash(self.ws.root()); - util::short_hash(&hashable) + util::short_hash(&(METADATA_VERSION, hashable)) } - /// Returns the appropriate output directory for the specified package and - /// target. - pub fn out_dir(&self, unit: &Unit<'a>) -> PathBuf { + /// Returns the directory where the artifacts for the given unit are + /// initially created. + pub fn out_dir(&self, unit: &Unit) -> PathBuf { if unit.mode.is_doc() { self.layout(unit.kind).doc().to_path_buf() } else if unit.mode.is_doc_test() { @@ -179,7 +196,10 @@ } /// Directory name to use for a package in the form `NAME-HASH`. - pub fn pkg_dir(&self, unit: &Unit<'a>) -> String { + /// + /// Note that some units may share the same directory, so care should be + /// taken in those cases! + fn pkg_dir(&self, unit: &Unit) -> String { let name = unit.pkg.package_id().name(); match self.metas[unit] { Some(ref meta) => format!("{}-{}", name, meta), @@ -199,24 +219,44 @@ /// Returns the directories where Rust crate dependencies are found for the /// specified unit. - pub fn deps_dir(&self, unit: &Unit<'_>) -> &Path { + pub fn deps_dir(&self, unit: &Unit) -> &Path { self.layout(unit.kind).deps() } /// Directory where the fingerprint for the given unit should go. - pub fn fingerprint_dir(&self, unit: &Unit<'a>) -> PathBuf { + pub fn fingerprint_dir(&self, unit: &Unit) -> PathBuf { let dir = self.pkg_dir(unit); self.layout(unit.kind).fingerprint().join(dir) } + /// Returns the path for a file in the fingerprint directory. + /// + /// The "prefix" should be something to distinguish the file from other + /// files in the fingerprint directory. + pub fn fingerprint_file_path(&self, unit: &Unit, prefix: &str) -> PathBuf { + // Different targets need to be distinguished in the + let kind = unit.target.kind().description(); + let flavor = if unit.mode.is_any_test() { + "test-" + } else if unit.mode.is_doc() { + "doc-" + } else if unit.mode.is_run_custom_build() { + "run-" + } else { + "" + }; + let name = format!("{}{}{}-{}", prefix, flavor, kind, unit.target.name()); + self.fingerprint_dir(unit).join(name) + } + /// Path where compiler output is cached. - pub fn message_cache_path(&self, unit: &Unit<'a>) -> PathBuf { - self.fingerprint_dir(unit).join("output") + pub fn message_cache_path(&self, unit: &Unit) -> PathBuf { + self.fingerprint_file_path(unit, "output-") } /// Returns the directory where a compiled build script is stored. /// `/path/to/target/{debug,release}/build/PKG-HASH` - pub fn build_script_dir(&self, unit: &Unit<'a>) -> PathBuf { + pub fn build_script_dir(&self, unit: &Unit) -> PathBuf { assert!(unit.target.is_custom_build()); assert!(!unit.mode.is_run_custom_build()); assert!(self.metas.contains_key(unit)); @@ -227,7 +267,7 @@ /// Returns the directory where information about running a build script /// is stored. /// `/path/to/target/{debug,release}/build/PKG-HASH` - pub fn build_script_run_dir(&self, unit: &Unit<'a>) -> PathBuf { + pub fn build_script_run_dir(&self, unit: &Unit) -> PathBuf { assert!(unit.target.is_custom_build()); assert!(unit.mode.is_run_custom_build()); let dir = self.pkg_dir(unit); @@ -236,18 +276,10 @@ /// Returns the "OUT_DIR" directory for running a build script. /// `/path/to/target/{debug,release}/build/PKG-HASH/out` - pub fn build_script_out_dir(&self, unit: &Unit<'a>) -> PathBuf { + pub fn build_script_out_dir(&self, unit: &Unit) -> PathBuf { self.build_script_run_dir(unit).join("out") } - /// Returns the file stem for a given target/profile combo (with metadata). - pub fn file_stem(&self, unit: &Unit<'a>) -> String { - match self.metas[unit] { - Some(ref metadata) => format!("{}-{}", unit.target.crate_name(), metadata), - None => self.bin_stem(unit), - } - } - /// Returns the path to the executable binary for the given bin target. /// /// This should only to be used when a `Unit` is not available. @@ -260,13 +292,12 @@ assert!(target.is_bin()); let dest = self.layout(kind).dest(); let info = bcx.target_data.info(kind); - let file_types = info - .file_types( - "bin", - FileFlavor::Normal, + let (file_types, _) = info + .rustc_outputs( + CompileMode::Build, &TargetKind::Bin, bcx.target_data.short_name(&kind), - )? + ) .expect("target must support `bin`"); let file_type = file_types @@ -274,13 +305,15 @@ .find(|file_type| file_type.flavor == FileFlavor::Normal) .expect("target must support `bin`"); - Ok(dest.join(file_type.filename(target.name()))) + Ok(dest.join(file_type.uplift_filename(target))) } /// Returns the filenames that the given unit will generate. + /// + /// Note: It is not guaranteed that all of the files will be generated. pub(super) fn outputs( &self, - unit: &Unit<'a>, + unit: &Unit, bcx: &BuildContext<'a, 'cfg>, ) -> CargoResult>> { self.outputs[unit] @@ -288,77 +321,58 @@ .map(Arc::clone) } - /// Returns the bin filename for a given target, without extension and metadata. - fn bin_stem(&self, unit: &Unit<'_>) -> String { - if unit.target.allows_underscores() { - unit.target.name().to_string() - } else { - unit.target.crate_name() - } - } - - /// Returns a tuple `(hard_link_dir, filename_stem)` for the primary - /// output file for the given unit. - /// - /// `hard_link_dir` is the directory where the file should be hard-linked - /// ("uplifted") to. For example, `/path/to/project/target`. + /// Returns the path where the output for the given unit and FileType + /// should be uplifted to. /// - /// `filename_stem` is the base filename without an extension. - /// - /// This function returns it in two parts so the caller can add - /// prefix/suffix to filename separately. - /// - /// Returns an `Option` because in some cases we don't want to link - /// (eg a dependent lib). - fn link_stem(&self, unit: &Unit<'a>) -> Option<(PathBuf, String)> { - let out_dir = self.out_dir(unit); - let bin_stem = self.bin_stem(unit); // Stem without metadata. - let file_stem = self.file_stem(unit); // Stem with metadata. - - // We currently only lift files up from the `deps` directory. If - // it was compiled into something like `example/` or `doc/` then - // we don't want to link it up. - if out_dir.ends_with("deps") { - // Don't lift up library dependencies. - if unit.target.is_bin() || self.roots.contains(unit) { - Some(( - out_dir.parent().unwrap().to_owned(), - if unit.mode.is_any_test() { - file_stem - } else { - bin_stem - }, - )) - } else { - None - } - } else if bin_stem == file_stem { - None - } else if out_dir.ends_with("examples") || out_dir.parent().unwrap().ends_with("build") { - Some((out_dir, bin_stem)) + /// Returns `None` if the unit shouldn't be uplifted (for example, a + /// dependent rlib). + fn uplift_to(&self, unit: &Unit, file_type: &FileType, from_path: &Path) -> Option { + // Tests, check, doc, etc. should not be uplifted. + if unit.mode != CompileMode::Build || file_type.flavor == FileFlavor::Rmeta { + return None; + } + // Only uplift: + // - Binaries: The user always wants to see these, even if they are + // implicitly built (for example for integration tests). + // - dylibs: This ensures that the dynamic linker pulls in all the + // latest copies (even if the dylib was built from a previous cargo + // build). There are complex reasons for this, see #8139, #6167, #6162. + // - Things directly requested from the command-line (the "roots"). + // This one is a little questionable for rlibs (see #6131), but is + // historically how Cargo has operated. This is primarily useful to + // give the user access to staticlibs and cdylibs. + if !unit.target.is_bin() + && !unit.target.is_custom_build() + && file_type.crate_type != Some(CrateType::Dylib) + && !self.roots.contains(unit) + { + return None; + } + + let filename = file_type.uplift_filename(&unit.target); + let uplift_path = if unit.target.is_example() { + // Examples live in their own little world. + self.layout(unit.kind).examples().join(filename) + } else if unit.target.is_custom_build() { + self.build_script_dir(unit).join(filename) } else { - None + self.layout(unit.kind).dest().join(filename) + }; + if from_path == uplift_path { + // This can happen with things like examples that reside in the + // same directory, do not have a metadata hash (like on Windows), + // and do not have hyphens. + return None; } + Some(uplift_path) } fn calc_outputs( &self, - unit: &Unit<'a>, + unit: &Unit, bcx: &BuildContext<'a, 'cfg>, ) -> CargoResult>> { let ret = match unit.mode { - CompileMode::Check { .. } => { - // This may be confusing. rustc outputs a file named `lib*.rmeta` - // for both libraries and binaries. - let file_stem = self.file_stem(unit); - let path = self.out_dir(unit).join(format!("lib{}.rmeta", file_stem)); - vec![OutputFile { - path, - hardlink: None, - export_path: None, - flavor: FileFlavor::Linkable { rmeta: false }, - }] - } CompileMode::Doc { .. } => { let path = self .out_dir(unit) @@ -382,134 +396,84 @@ // but Cargo does not know about that. vec![] } - CompileMode::Test | CompileMode::Build | CompileMode::Bench => { - self.calc_outputs_rustc(unit, bcx)? - } + CompileMode::Test + | CompileMode::Build + | CompileMode::Bench + | CompileMode::Check { .. } => self.calc_outputs_rustc(unit, bcx)?, }; info!("Target filenames: {:?}", ret); Ok(Arc::new(ret)) } + /// Computes the actual, full pathnames for all the files generated by rustc. + /// + /// The `OutputFile` also contains the paths where those files should be + /// "uplifted" to. fn calc_outputs_rustc( &self, - unit: &Unit<'a>, + unit: &Unit, bcx: &BuildContext<'a, 'cfg>, ) -> CargoResult> { - let mut ret = Vec::new(); - let mut unsupported = Vec::new(); - let out_dir = self.out_dir(unit); - let link_stem = self.link_stem(unit); - let info = bcx.target_data.info(unit.kind); - let file_stem = self.file_stem(unit); - let mut add = |crate_type: &str, flavor: FileFlavor| -> CargoResult<()> { - let crate_type = if crate_type == "lib" { - "rlib" - } else { - crate_type - }; - let file_types = info.file_types( - crate_type, - flavor, - unit.target.kind(), - bcx.target_data.short_name(&unit.kind), - )?; - - match file_types { - Some(types) => { - for file_type in types { - let path = out_dir.join(file_type.filename(&file_stem)); - let hardlink = link_stem - .as_ref() - .map(|&(ref ld, ref ls)| ld.join(file_type.filename(ls))); - let export_path = if unit.target.is_custom_build() { - None - } else { - self.export_dir.as_ref().and_then(|export_dir| { - hardlink - .as_ref() - .map(|hardlink| export_dir.join(hardlink.file_name().unwrap())) - }) - }; - ret.push(OutputFile { - path, - hardlink, - export_path, - flavor: file_type.flavor, - }); - } - } - // Not supported; don't worry about it. - None => { - unsupported.push(crate_type.to_string()); - } - } - Ok(()) - }; - match *unit.target.kind() { - TargetKind::Bin - | TargetKind::CustomBuild - | TargetKind::ExampleBin - | TargetKind::Bench - | TargetKind::Test => { - add("bin", FileFlavor::Normal)?; - } - TargetKind::Lib(..) | TargetKind::ExampleLib(..) if unit.mode.is_any_test() => { - add("bin", FileFlavor::Normal)?; - } - TargetKind::ExampleLib(ref kinds) | TargetKind::Lib(ref kinds) => { - for kind in kinds { - add( - kind.crate_type(), - if kind.linkable() { - FileFlavor::Linkable { rmeta: false } - } else { - FileFlavor::Normal - }, - )?; - } - let path = out_dir.join(format!("lib{}.rmeta", file_stem)); - if !unit.requires_upstream_objects() { - ret.push(OutputFile { - path, - hardlink: None, - export_path: None, - flavor: FileFlavor::Linkable { rmeta: true }, - }); - } - } - } - if ret.is_empty() { + let info = bcx.target_data.info(unit.kind); + let triple = bcx.target_data.short_name(&unit.kind); + let (file_types, unsupported) = + info.rustc_outputs(unit.mode, unit.target.kind(), triple)?; + if file_types.is_empty() { if !unsupported.is_empty() { + let unsupported_strs: Vec<_> = unsupported.iter().map(|ct| ct.as_str()).collect(); anyhow::bail!( "cannot produce {} for `{}` as the target `{}` \ does not support these crate types", - unsupported.join(", "), + unsupported_strs.join(", "), unit.pkg, - bcx.target_data.short_name(&unit.kind), + triple, ) } anyhow::bail!( "cannot compile `{}` as the target `{}` does not \ support any of the output crate types", unit.pkg, - bcx.target_data.short_name(&unit.kind), + triple, ); } - Ok(ret) + + // Convert FileType to OutputFile. + let mut outputs = Vec::new(); + for file_type in file_types { + let meta = self.metadata(unit).map(|m| m.to_string()); + let path = out_dir.join(file_type.output_filename(&unit.target, meta.as_deref())); + let hardlink = self.uplift_to(unit, &file_type, &path); + let export_path = if unit.target.is_custom_build() { + None + } else { + self.export_dir.as_ref().and_then(|export_dir| { + hardlink + .as_ref() + .map(|hardlink| export_dir.join(hardlink.file_name().unwrap())) + }) + }; + outputs.push(OutputFile { + path, + hardlink, + export_path, + flavor: file_type.flavor, + }); + } + Ok(outputs) } } -fn metadata_of<'a, 'cfg>( - unit: &Unit<'a>, - cx: &Context<'a, 'cfg>, - metas: &mut HashMap, Option>, +fn metadata_of( + unit: &Unit, + cx: &Context<'_, '_>, + metas: &mut HashMap>, ) -> Option { if !metas.contains_key(unit) { let meta = compute_metadata(unit, cx, metas); - metas.insert(*unit, meta); + metas.insert(unit.clone(), meta); for dep in cx.unit_deps(unit) { metadata_of(&dep.unit, cx, metas); } @@ -517,61 +481,18 @@ metas[unit] } -fn compute_metadata<'a, 'cfg>( - unit: &Unit<'a>, - cx: &Context<'a, 'cfg>, - metas: &mut HashMap, Option>, +fn compute_metadata( + unit: &Unit, + cx: &Context<'_, '_>, + metas: &mut HashMap>, ) -> Option { - if unit.mode.is_doc_test() { - // Doc tests do not have metadata. - return None; - } - // No metadata for dylibs because of a couple issues: - // - macOS encodes the dylib name in the executable, - // - Windows rustc multiple files of which we can't easily link all of them. - // - // No metadata for bin because of an issue: - // - wasm32 rustc/emcc encodes the `.wasm` name in the `.js` (rust-lang/cargo#4535). - // - msvc: The path to the PDB is embedded in the executable, and we don't - // want the PDB path to include the hash in it. - // - // Two exceptions: - // 1) Upstream dependencies (we aren't exporting + need to resolve name conflict), - // 2) `__CARGO_DEFAULT_LIB_METADATA` env var. - // - // Note, however, that the compiler's build system at least wants - // path dependencies (eg libstd) to have hashes in filenames. To account for - // that we have an extra hack here which reads the - // `__CARGO_DEFAULT_LIB_METADATA` environment variable and creates a - // hash in the filename if that's present. - // - // This environment variable should not be relied on! It's - // just here for rustbuild. We need a more principled method - // doing this eventually. let bcx = &cx.bcx; - let __cargo_default_lib_metadata = env::var("__CARGO_DEFAULT_LIB_METADATA"); - let short_name = bcx.target_data.short_name(&unit.kind); - if !(unit.mode.is_any_test() || unit.mode.is_check()) - && (unit.target.is_dylib() - || unit.target.is_cdylib() - || (unit.target.is_executable() && short_name.starts_with("wasm32-")) - || (unit.target.is_executable() && short_name.contains("msvc"))) - && unit.pkg.package_id().source_id().is_path() - && __cargo_default_lib_metadata.is_err() - { + if !should_use_metadata(bcx, unit) { return None; } + let mut hasher = StableHasher::new(); - let mut hasher = SipHasher::new(); - - // This is a generic version number that can be changed to make - // backwards-incompatible changes to any file structures in the output - // directory. For example, the fingerprint files or the build-script - // output files. Normally cargo updates ship with rustc updates which will - // cause a new hash due to the rustc version changing, but this allows - // cargo to be extra careful to deal with different versions of cargo that - // use the same rustc version. - 1.hash(&mut hasher); + METADATA_VERSION.hash(&mut hasher); // Unique metadata per (name, source, version) triple. This'll allow us // to pull crates from anywhere without worrying about conflicts. @@ -609,19 +530,19 @@ unit.target.name().hash(&mut hasher); unit.target.kind().hash(&mut hasher); - bcx.rustc().verbose_version.hash(&mut hasher); + hash_rustc_version(bcx, &mut hasher); - if cx.is_primary_package(unit) { + if cx.bcx.ws.is_member(&unit.pkg) { // This is primarily here for clippy. This ensures that the clippy // artifacts are separate from the `check` ones. - if let Some(proc) = &cx.bcx.build_config.primary_unit_rustc { - proc.get_program().hash(&mut hasher); + if let Some(path) = &cx.bcx.rustc().workspace_wrapper { + path.hash(&mut hasher); } } // Seed the contents of `__CARGO_DEFAULT_LIB_METADATA` to the hasher if present. // This should be the release channel, to get a different hash for each channel. - if let Ok(ref channel) = __cargo_default_lib_metadata { + if let Ok(ref channel) = env::var("__CARGO_DEFAULT_LIB_METADATA") { channel.hash(&mut hasher); } @@ -636,3 +557,85 @@ Some(Metadata(hasher.finish())) } + +fn hash_rustc_version(bcx: &BuildContext<'_, '_>, hasher: &mut StableHasher) { + let vers = &bcx.rustc().version; + if vers.pre.is_empty() || bcx.config.cli_unstable().separate_nightlies { + // For stable, keep the artifacts separate. This helps if someone is + // testing multiple versions, to avoid recompiles. + bcx.rustc().verbose_version.hash(hasher); + return; + } + // On "nightly"/"beta"/"dev"/etc, keep each "channel" separate. Don't hash + // the date/git information, so that whenever someone updates "nightly", + // they won't have a bunch of stale artifacts in the target directory. + // + // This assumes that the first segment is the important bit ("nightly", + // "beta", "dev", etc.). Skip other parts like the `.3` in `-beta.3`. + vers.pre[0].hash(hasher); + // Keep "host" since some people switch hosts to implicitly change + // targets, (like gnu vs musl or gnu vs msvc). In the future, we may want + // to consider hashing `unit.kind.short_name()` instead. + bcx.rustc().host.hash(hasher); + // None of the other lines are important. Currently they are: + // binary: rustc <-- or "rustdoc" + // commit-hash: 38114ff16e7856f98b2b4be7ab4cd29b38bed59a + // commit-date: 2020-03-21 + // host: x86_64-apple-darwin + // release: 1.44.0-nightly + // LLVM version: 9.0 + // + // The backend version ("LLVM version") might become more relevant in + // the future when cranelift sees more use, and people want to switch + // between different backends without recompiling. +} + +/// Returns whether or not this unit should use a metadata hash. +fn should_use_metadata(bcx: &BuildContext<'_, '_>, unit: &Unit) -> bool { + if unit.mode.is_doc_test() { + // Doc tests do not have metadata. + return false; + } + if unit.mode.is_any_test() || unit.mode.is_check() { + // These always use metadata. + return true; + } + // No metadata in these cases: + // + // - dylibs: + // - macOS encodes the dylib name in the executable, so it can't be renamed. + // - TODO: Are there other good reasons? If not, maybe this should be macos specific? + // - Windows MSVC executables: The path to the PDB is embedded in the + // executable, and we don't want the PDB path to include the hash in it. + // - wasm32 executables: When using emscripten, the path to the .wasm file + // is embedded in the .js file, so we don't want the hash in there. + // TODO: Is this necessary for wasm32-unknown-unknown? + // - apple executables: The executable name is used in the dSYM directory + // (such as `target/debug/foo.dSYM/Contents/Resources/DWARF/foo-64db4e4bf99c12dd`). + // Unfortunately this causes problems with our current backtrace + // implementation which looks for a file matching the exe name exactly. + // See https://github.com/rust-lang/rust/issues/72550#issuecomment-638501691 + // for more details. + // + // This is only done for local packages, as we don't expect to export + // dependencies. + // + // The __CARGO_DEFAULT_LIB_METADATA env var is used to override this to + // force metadata in the hash. This is only used for building libstd. For + // example, if libstd is placed in a common location, we don't want a file + // named /usr/lib/libstd.so which could conflict with other rustc + // installs. TODO: Is this still a realistic concern? + // See https://github.com/rust-lang/cargo/issues/3005 + let short_name = bcx.target_data.short_name(&unit.kind); + if (unit.target.is_dylib() + || unit.target.is_cdylib() + || (unit.target.is_executable() && short_name.starts_with("wasm32-")) + || (unit.target.is_executable() && short_name.contains("msvc")) + || (unit.target.is_executable() && short_name.contains("-apple-"))) + && unit.pkg.package_id().source_id().is_path() + && env::var("__CARGO_DEFAULT_LIB_METADATA").is_err() + { + return false; + } + true +} diff -Nru cargo-0.44.1/src/cargo/core/compiler/context/mod.rs cargo-0.47.0/src/cargo/core/compiler/context/mod.rs --- cargo-0.44.1/src/cargo/core/compiler/context/mod.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/context/mod.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,4 +1,3 @@ -#![allow(deprecated)] use std::collections::{BTreeSet, HashMap, HashSet}; use std::path::PathBuf; use std::sync::{Arc, Mutex}; @@ -9,14 +8,15 @@ use crate::core::compiler::{self, compilation, Unit}; use crate::core::PackageId; use crate::util::errors::{CargoResult, CargoResultExt}; -use crate::util::{profile, Config}; +use crate::util::profile; use super::build_plan::BuildPlan; use super::custom_build::{self, BuildDeps, BuildScriptOutputs, BuildScripts}; use super::fingerprint::Fingerprint; use super::job_queue::JobQueue; use super::layout::Layout; -use super::unit_dependencies::{UnitDep, UnitGraph}; +use super::lto::Lto; +use super::unit_graph::UnitDep; use super::{BuildContext, Compilation, CompileKind, CompileMode, Executor, FileFlavor}; mod compilation_files; @@ -34,26 +34,24 @@ /// Dependencies (like rerun-if-changed) declared by a build script. /// This is *only* populated from the output from previous runs. /// If the build script hasn't ever been run, then it must be run. - pub build_explicit_deps: HashMap, BuildDeps>, + pub build_explicit_deps: HashMap, /// Fingerprints used to detect if a unit is out-of-date. - pub fingerprints: HashMap, Arc>, + pub fingerprints: HashMap>, /// Cache of file mtimes to reduce filesystem hits. pub mtime_cache: HashMap, /// A set used to track which units have been compiled. /// A unit may appear in the job graph multiple times as a dependency of /// multiple packages, but it only needs to run once. - pub compiled: HashSet>, + pub compiled: HashSet, /// Linking information for each `Unit`. /// See `build_map` for details. - pub build_scripts: HashMap, Arc>, + pub build_scripts: HashMap>, /// Job server client to manage concurrency with other processes. pub jobserver: Client, /// "Primary" packages are the ones the user selected on the command-line /// with `-p` flags. If no flags are specified, then it is the defaults /// based on the current directory and the default workspace members. primary_packages: HashSet, - /// The dependency graph of units to compile. - unit_dependencies: UnitGraph<'a>, /// An abstraction of the files and directories that will be generated by /// the compilation. This is `None` until after `unit_dependencies` has /// been computed. @@ -68,21 +66,21 @@ /// A set of units which are compiling rlibs and are expected to produce /// metadata files in addition to the rlib itself. This is only filled in /// when `pipelining` above is enabled. - rmeta_required: HashSet>, + rmeta_required: HashSet, /// When we're in jobserver-per-rustc process mode, this keeps those /// jobserver clients for each Unit (which eventually becomes a rustc /// process). - pub rustc_clients: HashMap, Client>, + pub rustc_clients: HashMap, + + /// Map of the LTO-status of each unit. This indicates what sort of + /// compilation is happening (only object, only bitcode, both, etc), and is + /// precalculated early on. + pub lto: HashMap, } impl<'a, 'cfg> Context<'a, 'cfg> { - pub fn new( - config: &'cfg Config, - bcx: &'a BuildContext<'a, 'cfg>, - unit_dependencies: UnitGraph<'a>, - default_kind: CompileKind, - ) -> CargoResult { + pub fn new(bcx: &'a BuildContext<'a, 'cfg>) -> CargoResult { // Load up the jobserver that we'll use to manage our parallelism. This // is the same as the GNU make implementation of a jobserver, and // intentionally so! It's hoped that we can interact with GNU make and @@ -91,7 +89,7 @@ // Note that if we don't have a jobserver in our environment then we // create our own, and we create it with `n` tokens, but immediately // acquire one, because one token is ourself, a running process. - let jobserver = match config.jobserver_from_env() { + let jobserver = match bcx.config.jobserver_from_env() { Some(c) => c.clone(), None => { let client = Client::new(bcx.build_config.jobs as usize) @@ -105,7 +103,7 @@ Ok(Self { bcx, - compilation: Compilation::new(bcx, default_kind)?, + compilation: Compilation::new(bcx)?, build_script_outputs: Arc::new(Mutex::new(BuildScriptOutputs::default())), fingerprints: HashMap::new(), mtime_cache: HashMap::new(), @@ -114,31 +112,27 @@ build_explicit_deps: HashMap::new(), jobserver, primary_packages: HashSet::new(), - unit_dependencies, files: None, rmeta_required: HashSet::new(), rustc_clients: HashMap::new(), pipelining, + lto: HashMap::new(), }) } /// Starts compilation, waits for it to finish, and returns information /// about the result of compilation. - pub fn compile( - mut self, - units: &[Unit<'a>], - export_dir: Option, - exec: &Arc, - ) -> CargoResult> { - let mut queue = JobQueue::new(self.bcx, units); + pub fn compile(mut self, exec: &Arc) -> CargoResult> { + let mut queue = JobQueue::new(self.bcx); let mut plan = BuildPlan::new(); let build_plan = self.bcx.build_config.build_plan; - self.prepare_units(export_dir, units)?; + self.prepare_units()?; self.prepare()?; - custom_build::build_map(&mut self, units)?; + custom_build::build_map(&mut self)?; + super::lto::generate(&mut self)?; self.check_collistions()?; - for unit in units.iter() { + for unit in &self.bcx.roots { // Build up a list of pending jobs, each of which represent // compiling a particular package. No actual work is executed as // part of this, that's all done next as part of the `execute` @@ -163,11 +157,11 @@ if build_plan { plan.set_inputs(self.build_plan_inputs()?); - plan.output_plan(); + plan.output_plan(self.bcx.config); } // Collect the result of the build into `self.compilation`. - for unit in units.iter() { + for unit in &self.bcx.roots { // Collect tests and executables. for output in self.outputs(unit)?.iter() { if output.flavor == FileFlavor::DebugInfo || output.flavor == FileFlavor::Auxiliary @@ -178,20 +172,26 @@ let bindst = output.bin_dst(); if unit.mode == CompileMode::Test { - self.compilation.tests.push(( - unit.pkg.clone(), - unit.target.clone(), - output.path.clone(), - )); + self.compilation + .tests + .push((unit.clone(), output.path.clone())); } else if unit.target.is_executable() { - self.compilation.binaries.push(bindst.clone()); + self.compilation + .binaries + .push((unit.clone(), bindst.clone())); + } else if unit.target.is_cdylib() { + if !self.compilation.cdylibs.iter().any(|(u, _)| u == unit) { + self.compilation + .cdylibs + .push((unit.clone(), bindst.clone())); + } } } // If the unit has a build script, add `OUT_DIR` to the // environment variables. if unit.target.is_lib() { - for dep in &self.unit_dependencies[unit] { + for dep in &self.bcx.unit_graph[unit] { if dep.unit.mode.is_run_custom_build() { let out_dir = self .files() @@ -212,10 +212,10 @@ let mut unstable_opts = false; let args = compiler::extern_args(&self, unit, &mut unstable_opts)?; self.compilation.to_doc_test.push(compilation::Doctest { - package: unit.pkg.clone(), - target: unit.target.clone(), + unit: unit.clone(), args, unstable_opts, + linker: self.bcx.linker(unit.kind), }); } @@ -266,9 +266,9 @@ } /// Returns the executable for the specified unit (if any). - pub fn get_executable(&mut self, unit: &Unit<'a>) -> CargoResult> { + pub fn get_executable(&mut self, unit: &Unit) -> CargoResult> { for output in self.outputs(unit)?.iter() { - if output.flavor == FileFlavor::DebugInfo { + if output.flavor != FileFlavor::Normal { continue; } @@ -282,25 +282,22 @@ Ok(None) } - pub fn prepare_units( - &mut self, - export_dir: Option, - units: &[Unit<'a>], - ) -> CargoResult<()> { + pub fn prepare_units(&mut self) -> CargoResult<()> { let dest = self.bcx.profiles.get_dir_name(); let host_layout = Layout::new(self.bcx.ws, None, &dest)?; let mut targets = HashMap::new(); - if let CompileKind::Target(target) = self.bcx.build_config.requested_kind { - let layout = Layout::new(self.bcx.ws, Some(target), &dest)?; - targets.insert(target, layout); + for kind in self.bcx.build_config.requested_kinds.iter() { + if let CompileKind::Target(target) = *kind { + let layout = Layout::new(self.bcx.ws, Some(target), &dest)?; + targets.insert(target, layout); + } } self.primary_packages - .extend(units.iter().map(|u| u.pkg.package_id())); + .extend(self.bcx.roots.iter().map(|u| u.pkg.package_id())); self.record_units_requiring_metadata(); - let files = - CompilationFiles::new(units, host_layout, targets, export_dir, self.bcx.ws, self); + let files = CompilationFiles::new(self, host_layout, targets); self.files = Some(files); Ok(()) } @@ -320,12 +317,22 @@ .chain_err(|| "couldn't prepare build directories")?; } - self.compilation.host_deps_output = self.files_mut().host.deps().to_path_buf(); - let files = self.files.as_ref().unwrap(); - let layout = files.layout(self.bcx.build_config.requested_kind); - self.compilation.root_output = layout.dest().to_path_buf(); - self.compilation.deps_output = layout.deps().to_path_buf(); + for &kind in self + .bcx + .build_config + .requested_kinds + .iter() + .chain(Some(&CompileKind::Host)) + { + let layout = files.layout(kind); + self.compilation + .root_output + .insert(kind, layout.dest().to_path_buf()); + self.compilation + .deps_output + .insert(kind, layout.deps().to_path_buf()); + } Ok(()) } @@ -338,49 +345,49 @@ } /// Returns the filenames that the given unit will generate. - pub fn outputs(&self, unit: &Unit<'a>) -> CargoResult>> { + pub fn outputs(&self, unit: &Unit) -> CargoResult>> { self.files.as_ref().unwrap().outputs(unit, self.bcx) } /// Direct dependencies for the given unit. - pub fn unit_deps(&self, unit: &Unit<'a>) -> &[UnitDep<'a>] { - &self.unit_dependencies[unit] + pub fn unit_deps(&self, unit: &Unit) -> &[UnitDep] { + &self.bcx.unit_graph[unit] } /// Returns the RunCustomBuild Unit associated with the given Unit. /// /// If the package does not have a build script, this returns None. - pub fn find_build_script_unit(&self, unit: Unit<'a>) -> Option> { + pub fn find_build_script_unit(&self, unit: Unit) -> Option { if unit.mode.is_run_custom_build() { return Some(unit); } - self.unit_dependencies[&unit] + self.bcx.unit_graph[&unit] .iter() .find(|unit_dep| { unit_dep.unit.mode.is_run_custom_build() && unit_dep.unit.pkg.package_id() == unit.pkg.package_id() }) - .map(|unit_dep| unit_dep.unit) + .map(|unit_dep| unit_dep.unit.clone()) } /// Returns the metadata hash for the RunCustomBuild Unit associated with /// the given unit. /// /// If the package does not have a build script, this returns None. - pub fn find_build_script_metadata(&self, unit: Unit<'a>) -> Option { + pub fn find_build_script_metadata(&self, unit: Unit) -> Option { let script_unit = self.find_build_script_unit(unit)?; Some(self.get_run_build_script_metadata(&script_unit)) } /// Returns the metadata hash for a RunCustomBuild unit. - pub fn get_run_build_script_metadata(&self, unit: &Unit<'a>) -> Metadata { + pub fn get_run_build_script_metadata(&self, unit: &Unit) -> Metadata { assert!(unit.mode.is_run_custom_build()); self.files() .metadata(unit) .expect("build script should always have hash") } - pub fn is_primary_package(&self, unit: &Unit<'a>) -> bool { + pub fn is_primary_package(&self, unit: &Unit) -> bool { self.primary_packages.contains(&unit.pkg.package_id()) } @@ -390,7 +397,7 @@ // Keep sorted for consistency. let mut inputs = BTreeSet::new(); // Note: dev-deps are skipped if they are not present in the unit graph. - for unit in self.unit_dependencies.keys() { + for unit in self.bcx.unit_graph.keys() { inputs.insert(unit.pkg.manifest_path().to_path_buf()); } Ok(inputs.into_iter().collect()) @@ -398,21 +405,20 @@ fn check_collistions(&self) -> CargoResult<()> { let mut output_collisions = HashMap::new(); - let describe_collision = - |unit: &Unit<'_>, other_unit: &Unit<'_>, path: &PathBuf| -> String { - format!( - "The {} target `{}` in package `{}` has the same output \ + let describe_collision = |unit: &Unit, other_unit: &Unit, path: &PathBuf| -> String { + format!( + "The {} target `{}` in package `{}` has the same output \ filename as the {} target `{}` in package `{}`.\n\ Colliding filename is: {}\n", - unit.target.kind().description(), - unit.target.name(), - unit.pkg.package_id(), - other_unit.target.kind().description(), - other_unit.target.name(), - other_unit.pkg.package_id(), - path.display() - ) - }; + unit.target.kind().description(), + unit.target.name(), + unit.pkg.package_id(), + other_unit.target.kind().description(), + other_unit.target.name(), + other_unit.pkg.package_id(), + path.display() + ) + }; let suggestion = "Consider changing their names to be unique or compiling them separately.\n\ This may become a hard error in the future; see \ @@ -420,8 +426,8 @@ let rustdoc_suggestion = "This is a known bug where multiple crates with the same name use\n\ the same path; see ."; - let report_collision = |unit: &Unit<'_>, - other_unit: &Unit<'_>, + let report_collision = |unit: &Unit, + other_unit: &Unit, path: &PathBuf, suggestion: &str| -> CargoResult<()> { @@ -457,7 +463,8 @@ }; let mut keys = self - .unit_dependencies + .bcx + .unit_graph .keys() .filter(|unit| !unit.mode.is_run_custom_build()) .collect::>(); @@ -501,10 +508,10 @@ /// Units which depend only on the metadata of others requires the others to /// actually produce metadata, so we'll record that here. fn record_units_requiring_metadata(&mut self) { - for (key, deps) in self.unit_dependencies.iter() { + for (key, deps) in self.bcx.unit_graph.iter() { for dep in deps { if self.only_requires_rmeta(key, &dep.unit) { - self.rmeta_required.insert(dep.unit); + self.rmeta_required.insert(dep.unit.clone()); } } } @@ -512,7 +519,7 @@ /// Returns whether when `parent` depends on `dep` if it only requires the /// metadata file from `dep`. - pub fn only_requires_rmeta(&self, parent: &Unit<'a>, dep: &Unit<'a>) -> bool { + pub fn only_requires_rmeta(&self, parent: &Unit, dep: &Unit) -> bool { // this is only enabled when pipelining is enabled self.pipelining // We're only a candidate for requiring an `rmeta` file if we @@ -527,7 +534,7 @@ /// Returns whether when `unit` is built whether it should emit metadata as /// well because some compilations rely on that. - pub fn rmeta_required(&self, unit: &Unit<'a>) -> bool { + pub fn rmeta_required(&self, unit: &Unit) -> bool { self.rmeta_required.contains(unit) || self.bcx.config.cli_unstable().timings.is_some() } diff -Nru cargo-0.44.1/src/cargo/core/compiler/crate_type.rs cargo-0.47.0/src/cargo/core/compiler/crate_type.rs --- cargo-0.44.1/src/cargo/core/compiler/crate_type.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/crate_type.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,108 @@ +use std::fmt; + +#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub enum CrateType { + Bin, + Lib, + Rlib, + Dylib, + Cdylib, + Staticlib, + ProcMacro, + Other(String), +} + +impl CrateType { + pub fn as_str(&self) -> &str { + match self { + CrateType::Bin => "bin", + CrateType::Lib => "lib", + CrateType::Rlib => "rlib", + CrateType::Dylib => "dylib", + CrateType::Cdylib => "cdylib", + CrateType::Staticlib => "staticlib", + CrateType::ProcMacro => "proc-macro", + CrateType::Other(s) => s, + } + } + + pub fn can_lto(&self) -> bool { + match self { + CrateType::Bin | CrateType::Staticlib | CrateType::Cdylib => true, + CrateType::Lib + | CrateType::Rlib + | CrateType::Dylib + | CrateType::ProcMacro + | CrateType::Other(..) => false, + } + } + + pub fn is_linkable(&self) -> bool { + match self { + CrateType::Lib | CrateType::Rlib | CrateType::Dylib | CrateType::ProcMacro => true, + CrateType::Bin | CrateType::Cdylib | CrateType::Staticlib | CrateType::Other(..) => { + false + } + } + } + + pub fn is_dynamic(&self) -> bool { + match self { + CrateType::Dylib | CrateType::Cdylib | CrateType::ProcMacro => true, + CrateType::Lib + | CrateType::Rlib + | CrateType::Bin + | CrateType::Staticlib + | CrateType::Other(..) => false, + } + } + + pub fn requires_upstream_objects(&self) -> bool { + match self { + // "lib" == "rlib" and is a compilation that doesn't actually + // require upstream object files to exist, only upstream metadata + // files. As a result, it doesn't require upstream artifacts + CrateType::Lib | CrateType::Rlib => false, + + // Everything else, however, is some form of "linkable output" or + // something that requires upstream object files. + _ => true, + } + } +} + +impl fmt::Display for CrateType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.as_str().fmt(f) + } +} + +impl<'a> From<&'a String> for CrateType { + fn from(s: &'a String) -> Self { + match s.as_str() { + "bin" => CrateType::Bin, + "lib" => CrateType::Lib, + "rlib" => CrateType::Rlib, + "dylib" => CrateType::Dylib, + "cdylib" => CrateType::Cdylib, + "staticlib" => CrateType::Staticlib, + "procmacro" => CrateType::ProcMacro, + _ => CrateType::Other(s.clone()), + } + } +} + +impl fmt::Debug for CrateType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.to_string().fmt(f) + } +} + +impl serde::Serialize for CrateType { + fn serialize(&self, s: S) -> Result + where + S: serde::ser::Serializer, + { + self.to_string().serialize(s) + } +} diff -Nru cargo-0.44.1/src/cargo/core/compiler/custom_build.rs cargo-0.47.0/src/cargo/core/compiler/custom_build.rs --- cargo-0.44.1/src/cargo/core/compiler/custom_build.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/custom_build.rs 2020-07-17 20:39:39.000000000 +0000 @@ -11,7 +11,9 @@ use std::collections::{BTreeSet, HashSet}; use std::path::{Path, PathBuf}; use std::str; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; + +const CARGO_WARNING: &str = "cargo:warning="; /// Contains the parsed output of a custom build script. #[derive(Clone, Debug, Hash, Default)] @@ -100,7 +102,7 @@ } /// Prepares a `Work` that executes the target as a custom build script. -pub fn prepare<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult { +pub fn prepare(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult { let _p = profile::start(format!( "build script prepare: {}/{}", unit.pkg, @@ -145,7 +147,7 @@ state.stdout(msg); } -fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult { +fn build_work(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult { assert!(unit.mode.is_run_custom_build()); let bcx = &cx.bcx; let dependencies = cx.unit_deps(unit); @@ -175,7 +177,7 @@ // `Profiles::get_profile_run_custom_build` so that those flags get // carried over. let to_exec = to_exec.into_os_string(); - let mut cmd = cx.compilation.host_process(to_exec, unit.pkg)?; + let mut cmd = cx.compilation.host_process(to_exec, &unit.pkg)?; let debug = unit.profile.debuginfo.unwrap_or(0) != 0; cmd.env("OUT_DIR", &script_out_dir) .env("CARGO_MANIFEST_DIR", unit.pkg.root()) @@ -343,9 +345,13 @@ state.running(&cmd); let timestamp = paths::set_invocation_time(&script_run_dir)?; let prefix = format!("[{} {}] ", id.name(), id.version()); + let mut warnings_in_case_of_panic = Vec::new(); let output = cmd .exec_with_streaming( &mut |stdout| { + if stdout.starts_with(CARGO_WARNING) { + warnings_in_case_of_panic.push(stdout[CARGO_WARNING.len()..].to_owned()); + } if extra_verbose { state.stdout(format!("{}{}", prefix, stdout)); } @@ -359,7 +365,19 @@ }, true, ) - .chain_err(|| format!("failed to run custom build command for `{}`", pkg_name))?; + .chain_err(|| format!("failed to run custom build command for `{}`", pkg_name)); + + if let Err(error) = output { + insert_warnings_in_build_outputs( + build_script_outputs, + id, + metadata_hash, + warnings_in_case_of_panic, + ); + return Err(error); + } + + let output = output.unwrap(); // After the build command has finished running, we need to be sure to // remember all of its output so we can later discover precisely what it @@ -369,12 +387,9 @@ // state informing what variables were discovered via our script as // well. paths::write(&output_file, &output.stdout)?; - log::debug!( - "rewinding custom script output mtime {:?} to {}", - output_file, - timestamp - ); - filetime::set_file_times(output_file, timestamp, timestamp)?; + // This mtime shift allows Cargo to detect if a source file was + // modified in the middle of the build. + paths::set_file_time_no_err(output_file, timestamp); paths::write(&err_file, &output.stderr)?; paths::write(&root_output_file, util::path2bytes(&script_out_dir)?)?; let parsed_output = @@ -429,6 +444,22 @@ Ok(job) } +fn insert_warnings_in_build_outputs( + build_script_outputs: Arc>, + id: PackageId, + metadata_hash: Metadata, + warnings: Vec, +) { + let build_output_with_only_warnings = BuildOutput { + warnings, + ..BuildOutput::default() + }; + build_script_outputs + .lock() + .unwrap() + .insert(id, metadata_hash, build_output_with_only_warnings); +} + impl BuildOutput { pub fn parse_file( path: &Path, @@ -583,11 +614,7 @@ } } -fn prepare_metabuild<'a, 'cfg>( - cx: &Context<'a, 'cfg>, - unit: &Unit<'a>, - deps: &[String], -) -> CargoResult<()> { +fn prepare_metabuild(cx: &Context<'_, '_>, unit: &Unit, deps: &[String]) -> CargoResult<()> { let mut output = Vec::new(); let available_deps = cx.unit_deps(unit); // Filter out optional dependencies, and look up the actual lib name. @@ -647,9 +674,9 @@ /// /// The given set of units to this function is the initial set of /// targets/profiles which are being built. -pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>, units: &[Unit<'b>]) -> CargoResult<()> { +pub fn build_map(cx: &mut Context<'_, '_>) -> CargoResult<()> { let mut ret = HashMap::new(); - for unit in units { + for unit in &cx.bcx.roots { build(&mut ret, cx, unit)?; } cx.build_scripts @@ -658,10 +685,10 @@ // Recursive function to build up the map we're constructing. This function // memoizes all of its return values as it goes along. - fn build<'a, 'b, 'cfg>( - out: &'a mut HashMap, BuildScripts>, - cx: &mut Context<'b, 'cfg>, - unit: &Unit<'b>, + fn build<'a>( + out: &'a mut HashMap, + cx: &mut Context<'_, '_>, + unit: &Unit, ) -> CargoResult<&'a BuildScripts> { // Do a quick pre-flight check to see if we've already calculated the // set of dependencies. @@ -672,7 +699,7 @@ // If there is a build script override, pre-fill the build output. if unit.mode.is_run_custom_build() { if let Some(links) = unit.pkg.manifest().links() { - if let Some(output) = cx.bcx.script_override(links, unit.kind) { + if let Some(output) = cx.bcx.target_data.script_override(links, unit.kind) { let metadata = cx.get_run_build_script_metadata(unit); cx.build_script_outputs.lock().unwrap().insert( unit.pkg.package_id(), @@ -688,7 +715,7 @@ // If a package has a build script, add itself as something to inspect for linking. if !unit.target.is_custom_build() && unit.pkg.has_custom_build() { let script_meta = cx - .find_build_script_metadata(*unit) + .find_build_script_metadata(unit.clone()) .expect("has_custom_build should have RunCustomBuild"); add_to_link(&mut ret, unit.pkg.package_id(), script_meta); } @@ -702,7 +729,8 @@ // to rustc invocation caching schemes, so be sure to generate the same // set of build script dependency orderings via sorting the targets that // come out of the `Context`. - let mut dependencies: Vec> = cx.unit_deps(unit).iter().map(|d| d.unit).collect(); + let mut dependencies: Vec = + cx.unit_deps(unit).iter().map(|d| d.unit.clone()).collect(); dependencies.sort_by_key(|u| u.pkg.package_id()); for dep_unit in dependencies.iter() { @@ -710,14 +738,14 @@ if dep_unit.target.for_host() { ret.plugins.extend(dep_scripts.to_link.iter().cloned()); - } else if dep_unit.target.linkable() { + } else if dep_unit.target.is_linkable() { for &(pkg, metadata) in dep_scripts.to_link.iter() { add_to_link(&mut ret, pkg, metadata); } } } - match out.entry(*unit) { + match out.entry(unit.clone()) { Entry::Vacant(entry) => Ok(entry.insert(ret)), Entry::Occupied(_) => panic!("cyclic dependencies in `build_map`"), } @@ -731,15 +759,12 @@ } } - fn parse_previous_explicit_deps<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, - ) -> CargoResult<()> { + fn parse_previous_explicit_deps(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<()> { let script_run_dir = cx.files().build_script_run_dir(unit); let output_file = script_run_dir.join("output"); let (prev_output, _) = prev_build_output(cx, unit); let deps = BuildDeps::new(&output_file, prev_output.as_ref()); - cx.build_explicit_deps.insert(*unit, deps); + cx.build_explicit_deps.insert(unit.clone(), deps); Ok(()) } } @@ -749,10 +774,7 @@ /// /// Also returns the directory containing the output, typically used later in /// processing. -fn prev_build_output<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, -) -> (Option, PathBuf) { +fn prev_build_output(cx: &mut Context<'_, '_>, unit: &Unit) -> (Option, PathBuf) { let script_out_dir = cx.files().build_script_out_dir(unit); let script_run_dir = cx.files().build_script_run_dir(unit); let root_output_file = script_run_dir.join("root-output"); diff -Nru cargo-0.44.1/src/cargo/core/compiler/fingerprint.rs cargo-0.47.0/src/cargo/core/compiler/fingerprint.rs --- cargo-0.44.1/src/cargo/core/compiler/fingerprint.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/fingerprint.rs 2020-07-17 20:39:39.000000000 +0000 @@ -5,23 +5,30 @@ //! (needs to be recompiled) or "fresh" (it does not need to be recompiled). //! There are several mechanisms that influence a Unit's freshness: //! -//! - The `Metadata` hash isolates each Unit on the filesystem by being -//! embedded in the filename. If something in the hash changes, then the -//! output files will be missing, and the Unit will be dirty (missing -//! outputs are considered "dirty"). -//! - The `Fingerprint` is another hash, saved to the filesystem in the -//! `.fingerprint` directory, that tracks information about the inputs to a -//! Unit. If any of the inputs changes from the last compilation, then the -//! Unit is considered dirty. A missing fingerprint (such as during the -//! first build) is also considered dirty. -//! - Whether or not input files are actually present. For example a build -//! script which says it depends on a nonexistent file `foo` is always rerun. -//! - Propagation throughout the dependency graph of file modification time -//! information, used to detect changes on the filesystem. Each `Fingerprint` -//! keeps track of what files it'll be processing, and when necessary it will -//! check the `mtime` of each file (last modification time) and compare it to -//! dependencies and output to see if files have been changed or if a change -//! needs to force recompiles of downstream dependencies. +//! - The `Fingerprint` is a hash, saved to the filesystem in the +//! `.fingerprint` directory, that tracks information about the Unit. If the +//! fingerprint is missing (such as the first time the unit is being +//! compiled), then the unit is dirty. If any of the fingerprint fields +//! change (like the name of the source file), then the Unit is considered +//! dirty. +//! +//! The `Fingerprint` also tracks the fingerprints of all its dependencies, +//! so a change in a dependency will propagate the "dirty" status up. +//! +//! - Filesystem mtime tracking is also used to check if a unit is dirty. +//! See the section below on "Mtime comparison" for more details. There +//! are essentially two parts to mtime tracking: +//! +//! 1. The mtime of a Unit's output files is compared to the mtime of all +//! its dependencies' output file mtimes (see `check_filesystem`). If any +//! output is missing, or is older than a dependency's output, then the +//! unit is dirty. +//! 2. The mtime of a Unit's source files is compared to the mtime of its +//! dep-info file in the fingerprint directory (see `find_stale_file`). +//! The dep-info file is used as an anchor to know when the last build of +//! the unit was done. See the "dep-info files" section below for more +//! details. If any input files are missing, or are newer than the +//! dep-info, then the unit is dirty. //! //! Note: Fingerprinting is not a perfect solution. Filesystem mtime tracking //! is notoriously imprecise and problematic. Only a small part of the @@ -33,11 +40,17 @@ //! //! ## Fingerprints and Metadata //! +//! The `Metadata` hash is a hash added to the output filenames to isolate +//! each unit. See the documentation in the `compilation_files` module for +//! more details. NOTE: Not all output files are isolated via filename hashes +//! (like dylibs). The fingerprint directory uses a hash, but sometimes units +//! share the same fingerprint directory (when they don't have Metadata) so +//! care should be taken to handle this! +//! //! Fingerprints and Metadata are similar, and track some of the same things. //! The Metadata contains information that is required to keep Units separate. //! The Fingerprint includes additional information that should cause a -//! recompile, but it is desired to reuse the same filenames. Generally the -//! items in the Metadata do not need to be in the Fingerprint. A comparison +//! recompile, but it is desired to reuse the same filenames. A comparison //! of what is tracked: //! //! Value | Fingerprint | Metadata @@ -54,22 +67,33 @@ //! __CARGO_DEFAULT_LIB_METADATA[^4] | | ✓ //! package_id | | ✓ //! authors, description, homepage, repo | ✓ | -//! Target src path | ✓ | -//! Target path relative to ws | ✓ | +//! Target src path relative to ws | ✓ | //! Target flags (test/bench/for_host/edition) | ✓ | //! -C incremental=… flag | ✓ | //! mtime of sources | ✓[^3] | //! RUSTFLAGS/RUSTDOCFLAGS | ✓ | +//! LTO flags | ✓ | +//! config settings[^5] | ✓ | //! is_std | | ✓ //! //! [^1]: Build script and bin dependencies are not included. //! -//! [^3]: The mtime is only tracked for workspace members and path -//! dependencies. Git dependencies track the git revision. +//! [^3]: See below for details on mtime tracking. //! //! [^4]: `__CARGO_DEFAULT_LIB_METADATA` is set by rustbuild to embed the //! release channel (bootstrap/stable/beta/nightly) in libstd. //! +//! [^5]: Config settings that are not otherwise captured anywhere else. +//! Currently, this is only `doc.extern-map`. +//! +//! When deciding what should go in the Metadata vs the Fingerprint, consider +//! that some files (like dylibs) do not have a hash in their filename. Thus, +//! if a value changes, only the fingerprint will detect the change (consider, +//! for example, swapping between different features). Fields that are only in +//! Metadata generally aren't relevant to the fingerprint because they +//! fundamentally change the output (like target vs host changes the directory +//! where it is emitted). +//! //! ## Fingerprint files //! //! Fingerprint information is stored in the @@ -82,13 +106,13 @@ //! used to log details about *why* a fingerprint is considered dirty. //! `CARGO_LOG=cargo::core::compiler::fingerprint=trace cargo build` can be //! used to display this log information. -//! - A "dep-info" file which contains a list of source filenames for the -//! target. This is produced by reading the output of `rustc -//! --emit=dep-info` and packing it into a condensed format. Cargo uses this -//! to check the mtime of every file to see if any of them have changed. +//! - A "dep-info" file which is a translation of rustc's `*.d` dep-info files +//! to a Cargo-specific format that tweaks file names and is optimized for +//! reading quickly. //! - An `invoked.timestamp` file whose filesystem mtime is updated every time -//! the Unit is built. This is an experimental feature used for cleaning -//! unused artifacts. +//! the Unit is built. This is used for capturing the time when the build +//! starts, to detect if files are changed in the middle of the build. See +//! below for more details. //! //! Note that some units are a little different. A Unit for *running* a build //! script or for `rustdoc` does not have a dep-info file (it's not @@ -110,6 +134,106 @@ //! all dependencies, when it is updated, by using `Arc` clones, it //! automatically picks up the updates to its dependencies. //! +//! ### dep-info files +//! +//! Cargo passes the `--emit=dep-info` flag to `rustc` so that `rustc` will +//! generate a "dep info" file (with the `.d` extension). This is a +//! Makefile-like syntax that includes all of the source files used to build +//! the crate. This file is used by Cargo to know which files to check to see +//! if the crate will need to be rebuilt. +//! +//! After `rustc` exits successfully, Cargo will read the dep info file and +//! translate it into a binary format that is stored in the fingerprint +//! directory (`translate_dep_info`). The mtime of the fingerprint dep-info +//! file itself is used as the reference for comparing the source files to +//! determine if any of the source files have been modified (see below for +//! more detail). Note that Cargo parses the special `# env-var:...` comments in +//! dep-info files to learn about environment variables that the rustc compile +//! depends on. Cargo then later uses this to trigger a recompile if a +//! referenced env var changes (even if the source didn't change). +//! +//! There is also a third dep-info file. Cargo will extend the file created by +//! rustc with some additional information and saves this into the output +//! directory. This is intended for build system integration. See the +//! `output_depinfo` module for more detail. +//! +//! #### -Zbinary-dep-depinfo +//! +//! `rustc` has an experimental flag `-Zbinary-dep-depinfo`. This causes +//! `rustc` to include binary files (like rlibs) in the dep-info file. This is +//! primarily to support rustc development, so that Cargo can check the +//! implicit dependency to the standard library (which lives in the sysroot). +//! We want Cargo to recompile whenever the standard library rlib/dylibs +//! change, and this is a generic mechanism to make that work. +//! +//! ### Mtime comparison +//! +//! The use of modification timestamps is the most common way a unit will be +//! determined to be dirty or fresh between builds. There are many subtle +//! issues and edge cases with mtime comparisons. This gives a high-level +//! overview, but you'll need to read the code for the gritty details. Mtime +//! handling is different for different unit kinds. The different styles are +//! driven by the `Fingerprint.local` field, which is set based on the unit +//! kind. +//! +//! The status of whether or not the mtime is "stale" or "up-to-date" is +//! stored in `Fingerprint.fs_status`. +//! +//! All units will compare the mtime of its newest output file with the mtimes +//! of the outputs of all its dependencies. If any output file is missing, +//! then the unit is stale. If any dependency is newer, the unit is stale. +//! +//! #### Normal package mtime handling +//! +//! `LocalFingerprint::CheckDepinfo` is used for checking the mtime of +//! packages. It compares the mtime of the input files (the source files) to +//! the mtime of the dep-info file (which is written last after a build is +//! finished). If the dep-info is missing, the unit is stale (it has never +//! been built). The list of input files comes from the dep-info file. See the +//! section above for details on dep-info files. +//! +//! Also note that although registry and git packages use `CheckDepInfo`, none +//! of their source files are included in the dep-info (see +//! `translate_dep_info`), so for those kinds no mtime checking is done +//! (unless `-Zbinary-dep-depinfo` is used). Repository and git packages are +//! static, so there is no need to check anything. +//! +//! When a build is complete, the mtime of the dep-info file in the +//! fingerprint directory is modified to rewind it to the time when the build +//! started. This is done by creating an `invoked.timestamp` file when the +//! build starts to capture the start time. The mtime is rewound to the start +//! to handle the case where the user modifies a source file while a build is +//! running. Cargo can't know whether or not the file was included in the +//! build, so it takes a conservative approach of assuming the file was *not* +//! included, and it should be rebuilt during the next build. +//! +//! #### Rustdoc mtime handling +//! +//! Rustdoc does not emit a dep-info file, so Cargo currently has a relatively +//! simple system for detecting rebuilds. `LocalFingerprint::Precalculated` is +//! used for rustdoc units. For registry packages, this is the package +//! version. For git packages, it is the git hash. For path packages, it is +//! the a string of the mtime of the newest file in the package. +//! +//! There are some known bugs with how this works, so it should be improved at +//! some point. +//! +//! #### Build script mtime handling +//! +//! Build script mtime handling runs in different modes. There is the "old +//! style" where the build script does not emit any `rerun-if` directives. In +//! this mode, Cargo will use `LocalFingerprint::Precalculated`. See the +//! "rustdoc" section above how it works. +//! +//! In the new-style, each `rerun-if` directive is translated to the +//! corresponding `LocalFingerprint` variant. The `RerunIfChanged` variant +//! compares the mtime of the given filenames against the mtime of the +//! "output" file. +//! +//! Similar to normal units, the build script "output" file mtime is rewound +//! to the time just before the build script is executed to handle mid-build +//! modifications. +//! //! ## Considerations for inclusion in a fingerprint //! //! Over time we've realized a few items which historically were included in @@ -192,6 +316,7 @@ use std::env; use std::hash::{self, Hasher}; use std::path::{Path, PathBuf}; +use std::str; use std::sync::{Arc, Mutex}; use std::time::SystemTime; @@ -202,12 +327,13 @@ use serde::ser; use serde::{Deserialize, Serialize}; -use crate::core::compiler::unit_dependencies::UnitDep; -use crate::core::{InternedString, Package}; +use crate::core::compiler::unit_graph::UnitDep; +use crate::core::Package; use crate::util; use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::interning::InternedString; use crate::util::paths; -use crate::util::{internal, profile}; +use crate::util::{internal, profile, ProcessBuilder}; use super::custom_build::BuildDeps; use super::job::{ @@ -230,19 +356,14 @@ /// transitively propagate throughout the dependency graph, it only forces this /// one unit which is very unlikely to be what you want unless you're /// exclusively talking about top-level units. -pub fn prepare_target<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, - force: bool, -) -> CargoResult { +pub fn prepare_target(cx: &mut Context<'_, '_>, unit: &Unit, force: bool) -> CargoResult { let _p = profile::start(format!( "fingerprint: {} / {}", unit.pkg.package_id(), unit.target.name() )); let bcx = cx.bcx; - let new = cx.files().fingerprint_dir(unit); - let loc = new.join(&filename(cx, unit)); + let loc = cx.files().fingerprint_file_path(unit, ""); debug!("fingerprint at: {}", loc.display()); @@ -277,6 +398,40 @@ return Ok(Job::new(Work::noop(), Fresh)); } + // Clear out the old fingerprint file if it exists. This protects when + // compilation is interrupted leaving a corrupt file. For example, a + // project with a lib.rs and integration test (two units): + // + // 1. Build the library and integration test. + // 2. Make a change to lib.rs (NOT the integration test). + // 3. Build the integration test, hit Ctrl-C while linking. With gcc, this + // will leave behind an incomplete executable (zero size, or partially + // written). NOTE: The library builds successfully, it is the linking + // of the integration test that we are interrupting. + // 4. Build the integration test again. + // + // Without the following line, then step 3 will leave a valid fingerprint + // on the disk. Then step 4 will think the integration test is "fresh" + // because: + // + // - There is a valid fingerprint hash on disk (written in step 1). + // - The mtime of the output file (the corrupt integration executable + // written in step 3) is newer than all of its dependencies. + // - The mtime of the integration test fingerprint dep-info file (written + // in step 1) is newer than the integration test's source files, because + // we haven't modified any of its source files. + // + // But the executable is corrupt and needs to be rebuilt. Clearing the + // fingerprint at step 3 ensures that Cargo never mistakes a partially + // written output as up-to-date. + if loc.exists() { + // Truncate instead of delete so that compare_old_fingerprint will + // still log the reason for the fingerprint failure instead of just + // reporting "failed to read fingerprint" during the next build if + // this build fails. + paths::write(&loc, b"")?; + } + let write_fingerprint = if unit.mode.is_run_custom_build() { // For build scripts the `local` field of the fingerprint may change // while we're executing it. For example it could be in the legacy @@ -388,6 +543,8 @@ /// "description", which are exposed as environment variables during /// compilation. metadata: u64, + /// Hash of various config settings that change how things are compiled. + config: u64, /// Description of whether the filesystem status for this unit is up to date /// or should be considered stale. #[serde(skip)] @@ -484,9 +641,8 @@ #[derive(Debug, Serialize, Deserialize, Hash)] enum LocalFingerprint { /// This is a precalculated fingerprint which has an opaque string we just - /// hash as usual. This variant is primarily used for git/crates.io - /// dependencies where the source never changes so we can quickly conclude - /// that there's some string we can hash and it won't really change much. + /// hash as usual. This variant is primarily used for rustdoc where we + /// don't have a dep-info file to compare against. /// /// This is also used for build scripts with no `rerun-if-*` statements, but /// that's overall a mistake and causes bugs in Cargo. We shouldn't use this @@ -524,40 +680,64 @@ RerunIfEnvChanged { var: String, val: Option }, } -enum StaleFile { - Missing(PathBuf), - Changed { +enum StaleItem { + MissingFile(PathBuf), + ChangedFile { reference: PathBuf, reference_mtime: FileTime, stale: PathBuf, stale_mtime: FileTime, }, + ChangedEnv { + var: String, + previous: Option, + current: Option, + }, } impl LocalFingerprint { /// Checks dynamically at runtime if this `LocalFingerprint` has a stale - /// file. + /// item inside of it. + /// + /// The main purpose of this function is to handle two different ways + /// fingerprints can be invalidated: /// - /// This will use the absolute root paths passed in if necessary to guide - /// file accesses. - fn find_stale_file( + /// * One is a dependency listed in rustc's dep-info files is invalid. Note + /// that these could either be env vars or files. We check both here. + /// + /// * Another is the `rerun-if-changed` directive from build scripts. This + /// is where we'll find whether files have actually changed + fn find_stale_item( &self, mtime_cache: &mut HashMap, pkg_root: &Path, target_root: &Path, - ) -> CargoResult> { + ) -> CargoResult> { match self { - // We need to parse `dep_info`, learn about all the files the crate - // depends on, and then see if any of them are newer than the - // dep_info file itself. If the `dep_info` file is missing then this - // unit has never been compiled! + // We need to parse `dep_info`, learn about the crate's dependencies. + // + // For each env var we see if our current process's env var still + // matches, and for each file we see if any of them are newer than + // the `dep_info` file itself whose mtime represents the start of + // rustc. LocalFingerprint::CheckDepInfo { dep_info } => { let dep_info = target_root.join(dep_info); - if let Some(paths) = parse_dep_info(pkg_root, target_root, &dep_info)? { - Ok(find_stale_file(mtime_cache, &dep_info, paths.iter())) - } else { - Ok(Some(StaleFile::Missing(dep_info))) + let info = match parse_dep_info(pkg_root, target_root, &dep_info)? { + Some(info) => info, + None => return Ok(Some(StaleItem::MissingFile(dep_info))), + }; + for (key, previous) in info.env.iter() { + let current = env::var(key).ok(); + if current == *previous { + continue; + } + return Ok(Some(StaleItem::ChangedEnv { + var: key.clone(), + previous: previous.clone(), + current, + })); } + Ok(find_stale_file(mtime_cache, &dep_info, info.files.iter())) } // We need to verify that no paths listed in `paths` are newer than @@ -602,6 +782,7 @@ memoized_hash: Mutex::new(None), rustflags: Vec::new(), metadata: 0, + config: 0, fs_status: FsStatus::Stale, outputs: Vec::new(), } @@ -662,6 +843,9 @@ if self.metadata != old.metadata { bail!("metadata changed") } + if self.config != old.config { + bail!("configuration settings have changed") + } let my_local = self.local.lock().unwrap(); let old_local = old.local.lock().unwrap(); if my_local.len() != old_local.len() { @@ -740,7 +924,7 @@ if a.name != b.name { let e = format_err!("`{}` != `{}`", a.name, b.name) .context("unit dependency name changed"); - return Err(e.into()); + return Err(e); } if a.fingerprint.hash() != b.fingerprint.hash() { @@ -752,7 +936,7 @@ b.fingerprint.hash() ) .context("unit dependency information changed"); - return Err(e.into()); + return Err(e); } } @@ -833,10 +1017,9 @@ let (dep_path, dep_mtime) = if dep.only_requires_rmeta { dep_mtimes .iter() - .filter(|(path, _mtime)| { + .find(|(path, _mtime)| { path.extension().and_then(|s| s.to_str()) == Some("rmeta") }) - .next() .expect("failed to find rmeta") } else { match dep_mtimes.iter().max_by_key(|kv| kv.1) { @@ -872,8 +1055,8 @@ // files for this package itself. If we do find something log a helpful // message and bail out so we stay stale. for local in self.local.get_mut().unwrap().iter() { - if let Some(file) = local.find_stale_file(mtime_cache, pkg_root, target_root)? { - file.log(); + if let Some(item) = local.find_stale_item(mtime_cache, pkg_root, target_root)? { + item.log(); return Ok(()); } } @@ -897,12 +1080,13 @@ ref deps, ref local, metadata, + config, ref rustflags, .. } = *self; let local = local.lock().unwrap(); ( - rustc, features, target, path, profile, &*local, metadata, rustflags, + rustc, features, target, path, profile, &*local, metadata, config, rustflags, ) .hash(h); @@ -956,11 +1140,7 @@ } impl DepFingerprint { - fn new<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - parent: &Unit<'a>, - dep: &UnitDep<'a>, - ) -> CargoResult { + fn new(cx: &mut Context<'_, '_>, parent: &Unit, dep: &UnitDep) -> CargoResult { let fingerprint = calculate(cx, &dep.unit)?; // We need to be careful about what we hash here. We have a goal of // supporting renaming a project directory and not rebuilding @@ -988,7 +1168,7 @@ } } -impl StaleFile { +impl StaleItem { /// Use the `log` crate to log a hopefully helpful message in diagnosing /// what file is considered stale and why. This is intended to be used in /// conjunction with `CARGO_LOG` to determine why Cargo is recompiling @@ -996,10 +1176,10 @@ /// that. fn log(&self) { match self { - StaleFile::Missing(path) => { + StaleItem::MissingFile(path) => { info!("stale: missing {:?}", path); } - StaleFile::Changed { + StaleItem::ChangedFile { reference, reference_mtime, stale, @@ -1009,6 +1189,14 @@ info!(" (vs) {:?}", reference); info!(" {:?} != {:?}", reference_mtime, stale_mtime); } + StaleItem::ChangedEnv { + var, + previous, + current, + } => { + info!("stale: changed env {:?}", var); + info!(" {:?} != {:?}", previous, current); + } } } } @@ -1026,10 +1214,7 @@ /// /// Information like file modification time is only calculated for path /// dependencies. -fn calculate<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, -) -> CargoResult> { +fn calculate(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult> { // This function is slammed quite a lot, so the result is memoized. if let Some(s) = cx.fingerprints.get(unit) { return Ok(Arc::clone(s)); @@ -1048,16 +1233,14 @@ fingerprint.check_filesystem(&mut cx.mtime_cache, unit.pkg.root(), &target_root)?; let fingerprint = Arc::new(fingerprint); - cx.fingerprints.insert(*unit, Arc::clone(&fingerprint)); + cx.fingerprints + .insert(unit.clone(), Arc::clone(&fingerprint)); Ok(fingerprint) } /// Calculate a fingerprint for a "normal" unit, or anything that's not a build /// script. This is an internal helper of `calculate`, don't call directly. -fn calculate_normal<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, -) -> CargoResult { +fn calculate_normal(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult { // Recursively calculate the fingerprint for all of our dependencies. // // Skip fingerprints of binaries because they don't actually induce a @@ -1073,19 +1256,21 @@ .collect::>>()?; deps.sort_by(|a, b| a.pkg_id.cmp(&b.pkg_id)); - // Afterwards calculate our own fingerprint information. We specially - // handle `path` packages to ensure we track files on the filesystem - // correctly, but otherwise upstream packages like from crates.io or git - // get bland fingerprints because they don't change without their - // `PackageId` changing. + // Afterwards calculate our own fingerprint information. let target_root = target_root(cx); - let local = if use_dep_info(unit) { + let local = if unit.mode.is_doc() { + // rustdoc does not have dep-info files. + let fingerprint = pkg_fingerprint(cx.bcx, &unit.pkg).chain_err(|| { + format!( + "failed to determine package fingerprint for documenting {}", + unit.pkg + ) + })?; + vec![LocalFingerprint::Precalculated(fingerprint)] + } else { let dep_info = dep_info_loc(cx, unit); let dep_info = dep_info.strip_prefix(&target_root).unwrap().to_path_buf(); vec![LocalFingerprint::CheckDepInfo { dep_info }] - } else { - let fingerprint = pkg_fingerprint(cx.bcx, unit.pkg)?; - vec![LocalFingerprint::Precalculated(fingerprint)] }; // Figure out what the outputs of our unit is, and we'll be storing them @@ -1093,34 +1278,37 @@ let outputs = cx .outputs(unit)? .iter() - .filter(|output| output.flavor != FileFlavor::DebugInfo) + .filter(|output| !matches!(output.flavor, FileFlavor::DebugInfo | FileFlavor::Auxiliary)) .map(|output| output.path.clone()) .collect(); // Fill out a bunch more information that we'll be tracking typically // hashed to take up less space on disk as we just need to know when things // change. - let mut extra_flags = if unit.mode.is_doc() { + let extra_flags = if unit.mode.is_doc() { cx.bcx.rustdocflags_args(unit) } else { cx.bcx.rustflags_args(unit) } .to_vec(); - if cx.is_primary_package(unit) { - // This is primarily here for clippy arguments. - if let Some(proc) = &cx.bcx.build_config.primary_unit_rustc { - let args = proc - .get_args() - .iter() - .map(|s| s.to_string_lossy().to_string()); - extra_flags.extend(args); - } - } - let profile_hash = util::hash_u64((&unit.profile, unit.mode, cx.bcx.extra_args_for(unit))); + let profile_hash = util::hash_u64(( + &unit.profile, + unit.mode, + cx.bcx.extra_args_for(unit), + cx.lto[unit], + )); // Include metadata since it is exposed as environment variables. let m = unit.pkg.manifest().metadata(); let metadata = util::hash_u64((&m.authors, &m.description, &m.homepage, &m.repository)); + let config = if unit.mode.is_doc() && cx.bcx.config.cli_unstable().rustdoc_map { + cx.bcx + .config + .doc_extern_map() + .map_or(0, |map| util::hash_u64(map)) + } else { + 0 + }; Ok(Fingerprint { rustc: util::hash_u64(&cx.bcx.rustc().verbose_version), target: util::hash_u64(&unit.target), @@ -1133,24 +1321,16 @@ local: Mutex::new(local), memoized_hash: Mutex::new(None), metadata, + config, rustflags: extra_flags, fs_status: FsStatus::Stale, outputs, }) } -/// Whether or not the fingerprint should track the dependencies from the -/// dep-info file for this unit. -fn use_dep_info(unit: &Unit<'_>) -> bool { - !unit.mode.is_doc() -} - /// Calculate a fingerprint for an "execute a build script" unit. This is an /// internal helper of `calculate`, don't call directly. -fn calculate_run_custom_build<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, -) -> CargoResult { +fn calculate_run_custom_build(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult { assert!(unit.mode.is_run_custom_build()); // Using the `BuildDeps` information we'll have previously parsed and // inserted into `build_explicit_deps` built an initial snapshot of the @@ -1160,7 +1340,18 @@ // the whole crate. let (gen_local, overridden) = build_script_local_fingerprints(cx, unit); let deps = &cx.build_explicit_deps[unit]; - let local = (gen_local)(deps, Some(&|| pkg_fingerprint(cx.bcx, unit.pkg)))?.unwrap(); + let local = (gen_local)( + deps, + Some(&|| { + pkg_fingerprint(cx.bcx, &unit.pkg).chain_err(|| { + format!( + "failed to determine package fingerprint for build script for {}", + unit.pkg + ) + }) + }), + )? + .unwrap(); let output = deps.build_script_output.clone(); // Include any dependencies of our execution, which is typically just the @@ -1225,9 +1416,9 @@ /// improve please do so! /// /// FIXME(#6779) - see all the words above -fn build_script_local_fingerprints<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, +fn build_script_local_fingerprints( + cx: &mut Context<'_, '_>, + unit: &Unit, ) -> ( Box< dyn FnOnce( @@ -1300,9 +1491,9 @@ /// Create a `LocalFingerprint` for an overridden build script. /// Returns None if it is not overridden. -fn build_script_override_fingerprint<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, +fn build_script_override_fingerprint( + cx: &mut Context<'_, '_>, + unit: &Unit, ) -> Option { // Build script output is only populated at this stage when it is // overridden. @@ -1375,7 +1566,7 @@ } /// Prepare for work when a package starts to build -pub fn prepare_init<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<()> { +pub fn prepare_init(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<()> { let new1 = cx.files().fingerprint_dir(unit); // Doc tests have no output, thus no fingerprint. @@ -1388,10 +1579,8 @@ /// Returns the location that the dep-info file will show up at for the `unit` /// specified. -pub fn dep_info_loc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> PathBuf { - cx.files() - .fingerprint_dir(unit) - .join(&format!("dep-{}", filename(cx, unit))) +pub fn dep_info_loc(cx: &mut Context<'_, '_>, unit: &Unit) -> PathBuf { + cx.files().fingerprint_file_path(unit, "dep-") } /// Returns an absolute path that target directory. @@ -1411,7 +1600,7 @@ // update the mtime so other cleaners know we used it let t = FileTime::from_system_time(SystemTime::now()); debug!("mtime-on-use forcing {:?} to {}", loc, t); - filetime::set_file_times(loc, t, t)?; + paths::set_file_time_no_err(loc, t); } let new_hash = new_fingerprint.hash(); @@ -1423,13 +1612,16 @@ let old_fingerprint_json = paths::read(&loc.with_extension("json"))?; let old_fingerprint: Fingerprint = serde_json::from_str(&old_fingerprint_json) .chain_err(|| internal("failed to deserialize json"))?; - debug_assert_eq!(util::to_hex(old_fingerprint.hash()), old_fingerprint_short); + // Fingerprint can be empty after a failed rebuild (see comment in prepare_target). + if !old_fingerprint_short.is_empty() { + debug_assert_eq!(util::to_hex(old_fingerprint.hash()), old_fingerprint_short); + } let result = new_fingerprint.compare(&old_fingerprint); assert!(result.is_err()); result } -fn log_compare(unit: &Unit<'_>, compare: &CargoResult<()>) { +fn log_compare(unit: &Unit, compare: &CargoResult<()>) { let ce = match compare { Ok(..) => return, Err(e) => e, @@ -1441,33 +1633,44 @@ info!(" err: {:?}", ce); } -// Parse the dep-info into a list of paths +/// Parses Cargo's internal `EncodedDepInfo` structure that was previously +/// serialized to disk. +/// +/// Note that this is not rustc's `*.d` files. +/// +/// Also note that rustc's `*.d` files are translated to Cargo-specific +/// `EncodedDepInfo` files after compilations have finished in +/// `translate_dep_info`. +/// +/// Returns `None` if the file is corrupt or couldn't be read from disk. This +/// indicates that the crate should likely be rebuilt. pub fn parse_dep_info( pkg_root: &Path, target_root: &Path, dep_info: &Path, -) -> CargoResult>> { +) -> CargoResult> { let data = match paths::read_bytes(dep_info) { Ok(data) => data, Err(_) => return Ok(None), }; - let paths = data - .split(|&x| x == 0) - .filter(|x| !x.is_empty()) - .map(|p| { - let ty = match DepInfoPathType::from_byte(p[0]) { - Some(ty) => ty, - None => return Err(internal("dep-info invalid")), - }; - let path = util::bytes2path(&p[1..])?; - match ty { - DepInfoPathType::PackageRootRelative => Ok(pkg_root.join(path)), - // N.B. path might be absolute here in which case the join will have no effect - DepInfoPathType::TargetRootRelative => Ok(target_root.join(path)), - } - }) - .collect::, _>>()?; - Ok(Some(paths)) + let info = match EncodedDepInfo::parse(&data) { + Some(info) => info, + None => { + log::warn!("failed to parse cargo's dep-info at {:?}", dep_info); + return Ok(None); + } + }; + let mut ret = RustcDepInfo::default(); + ret.env = info.env; + for (ty, path) in info.files { + let path = match ty { + DepInfoPathType::PackageRootRelative => pkg_root.join(path), + // N.B. path might be absolute here in which case the join will have no effect + DepInfoPathType::TargetRootRelative => target_root.join(path), + }; + ret.files.push(path); + } + Ok(Some(ret)) } fn pkg_fingerprint(bcx: &BuildContext<'_, '_>, pkg: &Package) -> CargoResult { @@ -1484,14 +1687,14 @@ mtime_cache: &mut HashMap, reference: &Path, paths: I, -) -> Option +) -> Option where I: IntoIterator, I::Item: AsRef, { let reference_mtime = match paths::mtime(reference) { Ok(mtime) => mtime, - Err(..) => return Some(StaleFile::Missing(reference.to_path_buf())), + Err(..) => return Some(StaleItem::MissingFile(reference.to_path_buf())), }; for path in paths { @@ -1501,7 +1704,7 @@ Entry::Vacant(v) => { let mtime = match paths::mtime(path) { Ok(mtime) => mtime, - Err(..) => return Some(StaleFile::Missing(path.to_path_buf())), + Err(..) => return Some(StaleItem::MissingFile(path.to_path_buf())), }; *v.insert(mtime) } @@ -1529,7 +1732,7 @@ continue; } - return Some(StaleFile::Changed { + return Some(StaleItem::ChangedFile { reference: reference.to_path_buf(), reference_mtime, stale: path.to_path_buf(), @@ -1544,41 +1747,12 @@ None } -fn filename<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> String { - // file_stem includes metadata hash. Thus we have a different - // fingerprint for every metadata hash version. This works because - // even if the package is fresh, we'll still link the fresh target - let file_stem = cx.files().file_stem(unit); - let kind = unit.target.kind().description(); - let flavor = if unit.mode.is_any_test() { - "test-" - } else if unit.mode.is_doc() { - "doc-" - } else if unit.mode.is_run_custom_build() { - "run-" - } else { - "" - }; - format!("{}{}-{}", flavor, kind, file_stem) -} - -#[repr(u8)] enum DepInfoPathType { // src/, e.g. src/lib.rs - PackageRootRelative = 1, + PackageRootRelative, // target/debug/deps/lib... // or an absolute path /.../sysroot/... - TargetRootRelative = 2, -} - -impl DepInfoPathType { - fn from_byte(b: u8) -> Option { - match b { - 1 => Some(DepInfoPathType::PackageRootRelative), - 2 => Some(DepInfoPathType::TargetRootRelative), - _ => None, - } - } + TargetRootRelative, } /// Parses the dep-info file coming out of rustc into a Cargo-specific format. @@ -1599,7 +1773,8 @@ /// included. If it is false, then package-relative paths are skipped and /// ignored (typically used for registry or git dependencies where we assume /// the source never changes, and we don't want the cost of running `stat` on -/// all those files). +/// all those files). See the module-level docs for the note about +/// `-Zbinary-dep-depinfo` for more details on why this is done. /// /// The serialized Cargo format will contain a list of files, all of which are /// relative if they're under `root`. or absolute if they're elsewhere. @@ -1609,18 +1784,45 @@ rustc_cwd: &Path, pkg_root: &Path, target_root: &Path, + rustc_cmd: &ProcessBuilder, allow_package: bool, ) -> CargoResult<()> { - let target = parse_rustc_dep_info(rustc_dep_info)?; - let deps = &target - .get(0) - .ok_or_else(|| internal("malformed dep-info format, no targets".to_string()))? - .1; + let depinfo = parse_rustc_dep_info(rustc_dep_info)?; let target_root = target_root.canonicalize()?; let pkg_root = pkg_root.canonicalize()?; - let mut new_contents = Vec::new(); - for file in deps { + let mut on_disk_info = EncodedDepInfo::default(); + on_disk_info.env = depinfo.env; + + // This is a bit of a tricky statement, but here we're *removing* the + // dependency on environment variables that were defined specifically for + // the command itself. Environment variables returend by `get_envs` includes + // environment variables like: + // + // * `OUT_DIR` if applicable + // * env vars added by a build script, if any + // + // The general idea here is that the dep info file tells us what, when + // changed, should cause us to rebuild the crate. These environment + // variables are synthesized by Cargo and/or the build script, and the + // intention is that their values are tracked elsewhere for whether the + // crate needs to be rebuilt. + // + // For example a build script says when it needs to be rerun and otherwise + // it's assumed to produce the same output, so we're guaranteed that env + // vars defined by the build script will always be the same unless the build + // script itself reruns, in which case the crate will rerun anyway. + // + // For things like `OUT_DIR` it's a bit sketchy for now. Most of the time + // that's used for code generation but this is technically buggy where if + // you write a binary that does `println!("{}", env!("OUT_DIR"))` we won't + // recompile that if you move the target directory. Hopefully that's not too + // bad of an issue for now... + on_disk_info + .env + .retain(|(key, _)| !rustc_cmd.get_envs().contains_key(key)); + + for file in depinfo.files { // The path may be absolute or relative, canonical or not. Make sure // it is canonicalized so we are comparing the same kinds of paths. let abs_file = rustc_cwd.join(file); @@ -1642,28 +1844,157 @@ // effect. (DepInfoPathType::TargetRootRelative, &*abs_file) }; - new_contents.push(ty as u8); - new_contents.extend(util::path2bytes(path)?); - new_contents.push(0); + on_disk_info.files.push((ty, path.to_owned())); } - paths::write(cargo_dep_info, &new_contents)?; + paths::write(cargo_dep_info, on_disk_info.serialize()?)?; Ok(()) } +#[derive(Default)] +pub struct RustcDepInfo { + /// The list of files that the main target in the dep-info file depends on. + pub files: Vec, + /// The list of environment variables we found that the rustc compilation + /// depends on. + /// + /// The first element of the pair is the name of the env var and the second + /// item is the value. `Some` means that the env var was set, and `None` + /// means that the env var wasn't actually set and the compilation depends + /// on it not being set. + pub env: Vec<(String, Option)>, +} + +// Same as `RustcDepInfo` except avoids absolute paths as much as possible to +// allow moving around the target directory. +// +// This is also stored in an optimized format to make parsing it fast because +// Cargo will read it for crates on all future compilations. +#[derive(Default)] +struct EncodedDepInfo { + files: Vec<(DepInfoPathType, PathBuf)>, + env: Vec<(String, Option)>, +} + +impl EncodedDepInfo { + fn parse(mut bytes: &[u8]) -> Option { + let bytes = &mut bytes; + let nfiles = read_usize(bytes)?; + let mut files = Vec::with_capacity(nfiles as usize); + for _ in 0..nfiles { + let ty = match read_u8(bytes)? { + 0 => DepInfoPathType::PackageRootRelative, + 1 => DepInfoPathType::TargetRootRelative, + _ => return None, + }; + let bytes = read_bytes(bytes)?; + files.push((ty, util::bytes2path(bytes).ok()?)); + } + + let nenv = read_usize(bytes)?; + let mut env = Vec::with_capacity(nenv as usize); + for _ in 0..nenv { + let key = str::from_utf8(read_bytes(bytes)?).ok()?.to_string(); + let val = match read_u8(bytes)? { + 0 => None, + 1 => Some(str::from_utf8(read_bytes(bytes)?).ok()?.to_string()), + _ => return None, + }; + env.push((key, val)); + } + return Some(EncodedDepInfo { files, env }); + + fn read_usize(bytes: &mut &[u8]) -> Option { + let ret = bytes.get(..4)?; + *bytes = &bytes[4..]; + Some( + ((ret[0] as usize) << 0) + | ((ret[1] as usize) << 8) + | ((ret[2] as usize) << 16) + | ((ret[3] as usize) << 24), + ) + } + + fn read_u8(bytes: &mut &[u8]) -> Option { + let ret = *bytes.get(0)?; + *bytes = &bytes[1..]; + Some(ret) + } + + fn read_bytes<'a>(bytes: &mut &'a [u8]) -> Option<&'a [u8]> { + let n = read_usize(bytes)? as usize; + let ret = bytes.get(..n)?; + *bytes = &bytes[n..]; + Some(ret) + } + } + + fn serialize(&self) -> CargoResult> { + let mut ret = Vec::new(); + let dst = &mut ret; + write_usize(dst, self.files.len()); + for (ty, file) in self.files.iter() { + match ty { + DepInfoPathType::PackageRootRelative => dst.push(0), + DepInfoPathType::TargetRootRelative => dst.push(1), + } + write_bytes(dst, util::path2bytes(file)?); + } + + write_usize(dst, self.env.len()); + for (key, val) in self.env.iter() { + write_bytes(dst, key); + match val { + None => dst.push(0), + Some(val) => { + dst.push(1); + write_bytes(dst, val); + } + } + } + return Ok(ret); + + fn write_bytes(dst: &mut Vec, val: impl AsRef<[u8]>) { + let val = val.as_ref(); + write_usize(dst, val.len()); + dst.extend_from_slice(val); + } + + fn write_usize(dst: &mut Vec, val: usize) { + dst.push(val as u8); + dst.push((val >> 8) as u8); + dst.push((val >> 16) as u8); + dst.push((val >> 24) as u8); + } + } +} + /// Parse the `.d` dep-info file generated by rustc. -/// -/// Result is a Vec of `(target, prerequisites)` tuples where `target` is the -/// rule name, and `prerequisites` is a list of files that it depends on. -pub fn parse_rustc_dep_info(rustc_dep_info: &Path) -> CargoResult)>> { +pub fn parse_rustc_dep_info(rustc_dep_info: &Path) -> CargoResult { let contents = paths::read(rustc_dep_info)?; - contents - .lines() - .filter_map(|l| l.find(": ").map(|i| (l, i))) - .map(|(line, pos)| { - let target = &line[..pos]; + let mut ret = RustcDepInfo::default(); + let mut found_deps = false; + + for line in contents.lines() { + let env_dep_prefix = "# env-dep:"; + if line.starts_with(env_dep_prefix) { + let rest = &line[env_dep_prefix.len()..]; + let mut parts = rest.splitn(2, '='); + let env_var = match parts.next() { + Some(s) => s, + None => continue, + }; + let env_val = match parts.next() { + Some(s) => Some(unescape_env(s)?), + None => None, + }; + ret.env.push((unescape_env(env_var)?, env_val)); + } else if let Some(pos) = line.find(": ") { + if found_deps { + continue; + } + found_deps = true; let mut deps = line[pos + 2..].split_whitespace(); - let mut ret = Vec::new(); while let Some(s) = deps.next() { let mut file = s.to_string(); while file.ends_with('\\') { @@ -1673,9 +2004,31 @@ internal("malformed dep-info format, trailing \\".to_string()) })?); } - ret.push(file); + ret.files.push(file.into()); } - Ok((target.to_string(), ret)) - }) - .collect() + } + } + return Ok(ret); + + // rustc tries to fit env var names and values all on a single line, which + // means it needs to escape `\r` and `\n`. The escape syntax used is "\n" + // which means that `\` also needs to be escaped. + fn unescape_env(s: &str) -> CargoResult { + let mut ret = String::with_capacity(s.len()); + let mut chars = s.chars(); + while let Some(c) = chars.next() { + if c != '\\' { + ret.push(c); + continue; + } + match chars.next() { + Some('\\') => ret.push('\\'), + Some('n') => ret.push('\n'), + Some('r') => ret.push('\r'), + Some(c) => bail!("unknown escape character `{}`", c), + None => bail!("unterminated escape character"), + } + } + Ok(ret) + } } diff -Nru cargo-0.44.1/src/cargo/core/compiler/job_queue.rs cargo-0.47.0/src/cargo/core/compiler/job_queue.rs --- cargo-0.44.1/src/cargo/core/compiler/job_queue.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/job_queue.rs 2020-07-17 20:39:39.000000000 +0000 @@ -58,7 +58,6 @@ use std::time::Duration; use anyhow::format_err; -use crossbeam_channel::{unbounded, Receiver, Sender}; use crossbeam_utils::thread::Scope; use jobserver::{Acquired, Client, HelperThread}; use log::{debug, info, trace}; @@ -70,21 +69,21 @@ }; use super::timings::Timings; use super::{BuildContext, BuildPlan, CompileMode, Context, Unit}; -use crate::core::{PackageId, TargetKind}; -use crate::util; +use crate::core::{PackageId, Shell, TargetKind}; use crate::util::diagnostic_server::{self, DiagnosticPrinter}; -use crate::util::{internal, profile, CargoResult, CargoResultExt, ProcessBuilder}; -use crate::util::{Config, DependencyQueue}; -use crate::util::{Progress, ProgressStyle}; +use crate::util::machine_message::{self, Message as _}; +use crate::util::{self, internal, profile}; +use crate::util::{CargoResult, CargoResultExt, ProcessBuilder}; +use crate::util::{Config, DependencyQueue, Progress, ProgressStyle, Queue}; /// This structure is backed by the `DependencyQueue` type and manages the /// queueing of compilation steps for each package. Packages enqueue units of /// work and then later on the entire graph is converted to DrainState and /// executed. -pub struct JobQueue<'a, 'cfg> { - queue: DependencyQueue, Artifact, Job>, +pub struct JobQueue<'cfg> { + queue: DependencyQueue, counts: HashMap, - timings: Timings<'a, 'cfg>, + timings: Timings<'cfg>, } /// This structure is backed by the `DependencyQueue` type and manages the @@ -93,20 +92,41 @@ /// /// It is created from JobQueue when we have fully assembled the crate graph /// (i.e., all package dependencies are known). -struct DrainState<'a, 'cfg> { +/// +/// # Message queue +/// +/// Each thread running a process uses the message queue to send messages back +/// to the main thread. The main thread coordinates everything, and handles +/// printing output. +/// +/// It is important to be careful which messages use `push` vs `push_bounded`. +/// `push` is for priority messages (like tokens, or "finished") where the +/// sender shouldn't block. We want to handle those so real work can proceed +/// ASAP. +/// +/// `push_bounded` is only for messages being printed to stdout/stderr. Being +/// bounded prevents a flood of messages causing a large amount of memory +/// being used. +/// +/// `push` also avoids blocking which helps avoid deadlocks. For example, when +/// the diagnostic server thread is dropped, it waits for the thread to exit. +/// But if the thread is blocked on a full queue, and there is a critical +/// error, the drop will deadlock. This should be fixed at some point in the +/// future. The jobserver thread has a similar problem, though it will time +/// out after 1 second. +struct DrainState<'cfg> { // This is the length of the DependencyQueue when starting out total_units: usize, - queue: DependencyQueue, Artifact, Job>, - tx: Sender, - rx: Receiver, - active: HashMap>, + queue: DependencyQueue, + messages: Arc>, + active: HashMap, compiled: HashSet, documented: HashSet, counts: HashMap, progress: Progress<'cfg>, next_id: u32, - timings: Timings<'a, 'cfg>, + timings: Timings<'cfg>, /// Tokens that are currently owned by this Cargo, and may be "associated" /// with a rustc process. They may also be unused, though if so will be @@ -127,7 +147,7 @@ /// The list of jobs that we have not yet started executing, but have /// retrieved from the `queue`. We eagerly pull jobs off the main queue to /// allow us to request jobserver tokens pretty early. - pending_queue: Vec<(Unit<'a>, Job)>, + pending_queue: Vec<(Unit, Job)>, print: DiagnosticPrinter<'cfg>, // How many jobs we've finished @@ -145,7 +165,7 @@ pub struct JobState<'a> { /// Channel back to the main thread to coordinate messages and such. - tx: Sender, + messages: Arc>, /// The job id that this state is associated with, used when sending /// messages back to the main thread. @@ -199,7 +219,7 @@ impl<'a> JobState<'a> { pub fn running(&self, cmd: &ProcessBuilder) { - let _ = self.tx.send(Message::Run(self.id, cmd.to_string())); + self.messages.push(Message::Run(self.id, cmd.to_string())); } pub fn build_plan( @@ -208,17 +228,16 @@ cmd: ProcessBuilder, filenames: Arc>, ) { - let _ = self - .tx - .send(Message::BuildPlanMsg(module_name, cmd, filenames)); + self.messages + .push(Message::BuildPlanMsg(module_name, cmd, filenames)); } pub fn stdout(&self, stdout: String) { - drop(self.tx.send(Message::Stdout(stdout))); + self.messages.push_bounded(Message::Stdout(stdout)); } pub fn stderr(&self, stderr: String) { - drop(self.tx.send(Message::Stderr(stderr))); + self.messages.push_bounded(Message::Stderr(stderr)); } /// A method used to signal to the coordinator thread that the rmeta file @@ -228,9 +247,8 @@ /// produced once! pub fn rmeta_produced(&self) { self.rmeta_required.set(false); - let _ = self - .tx - .send(Message::Finish(self.id, Artifact::Metadata, Ok(()))); + self.messages + .push(Message::Finish(self.id, Artifact::Metadata, Ok(()))); } /// The rustc underlying this Job is about to acquire a jobserver token (i.e., block) @@ -239,32 +257,27 @@ /// This should arrange for the associated client to eventually get a token via /// `client.release_raw()`. pub fn will_acquire(&self) { - let _ = self.tx.send(Message::NeedsToken(self.id)); + self.messages.push(Message::NeedsToken(self.id)); } /// The rustc underlying this Job is informing us that it is done with a jobserver token. /// /// Note that it does *not* write that token back anywhere. pub fn release_token(&self) { - let _ = self.tx.send(Message::ReleaseToken(self.id)); + self.messages.push(Message::ReleaseToken(self.id)); } } -impl<'a, 'cfg> JobQueue<'a, 'cfg> { - pub fn new(bcx: &BuildContext<'a, 'cfg>, root_units: &[Unit<'a>]) -> JobQueue<'a, 'cfg> { +impl<'cfg> JobQueue<'cfg> { + pub fn new(bcx: &BuildContext<'_, 'cfg>) -> JobQueue<'cfg> { JobQueue { queue: DependencyQueue::new(), counts: HashMap::new(), - timings: Timings::new(bcx, root_units), + timings: Timings::new(bcx, &bcx.roots), } } - pub fn enqueue( - &mut self, - cx: &Context<'a, 'cfg>, - unit: &Unit<'a>, - job: Job, - ) -> CargoResult<()> { + pub fn enqueue(&mut self, cx: &Context<'_, 'cfg>, unit: &Unit, job: Job) -> CargoResult<()> { let dependencies = cx.unit_deps(unit); let mut queue_deps = dependencies .iter() @@ -283,7 +296,7 @@ } else { Artifact::All }; - (dep.unit, artifact) + (dep.unit.clone(), artifact) }) .collect::>(); @@ -310,23 +323,23 @@ // transitively contains the `Metadata` edge. if unit.requires_upstream_objects() { for dep in dependencies { - depend_on_deps_of_deps(cx, &mut queue_deps, dep.unit); + depend_on_deps_of_deps(cx, &mut queue_deps, dep.unit.clone()); } - fn depend_on_deps_of_deps<'a>( - cx: &Context<'a, '_>, - deps: &mut HashMap, Artifact>, - unit: Unit<'a>, + fn depend_on_deps_of_deps( + cx: &Context<'_, '_>, + deps: &mut HashMap, + unit: Unit, ) { for dep in cx.unit_deps(&unit) { - if deps.insert(dep.unit, Artifact::All).is_none() { - depend_on_deps_of_deps(cx, deps, dep.unit); + if deps.insert(dep.unit.clone(), Artifact::All).is_none() { + depend_on_deps_of_deps(cx, deps, dep.unit.clone()); } } } } - self.queue.queue(*unit, job, queue_deps); + self.queue.queue(unit.clone(), job, queue_deps); *self.counts.entry(unit.pkg.package_id()).or_insert(0) += 1; Ok(()) } @@ -336,17 +349,19 @@ /// This function will spawn off `config.jobs()` workers to build all of the /// necessary dependencies, in order. Freshness is propagated as far as /// possible along each dependency chain. - pub fn execute(mut self, cx: &mut Context<'a, '_>, plan: &mut BuildPlan) -> CargoResult<()> { + pub fn execute(mut self, cx: &mut Context<'_, '_>, plan: &mut BuildPlan) -> CargoResult<()> { let _p = profile::start("executing the job graph"); self.queue.queue_finished(); - let (tx, rx) = unbounded(); let progress = Progress::with_style("Building", ProgressStyle::Ratio, cx.bcx.config); let state = DrainState { total_units: self.queue.len(), queue: self.queue, - tx, - rx, + // 100 here is somewhat arbitrary. It is a few screenfulls of + // output, and hopefully at most a few megabytes of memory for + // typical messages. If you change this, please update the test + // caching_large_output, too. + messages: Arc::new(Queue::new(100)), active: HashMap::new(), compiled: HashSet::new(), documented: HashSet::new(), @@ -354,7 +369,6 @@ progress, next_id: 0, timings: self.timings, - tokens: Vec::new(), rustc_tokens: HashMap::new(), to_send_clients: BTreeMap::new(), @@ -364,38 +378,45 @@ }; // Create a helper thread for acquiring jobserver tokens - let tx = state.tx.clone(); + let messages = state.messages.clone(); let helper = cx .jobserver .clone() .into_helper_thread(move |token| { - drop(tx.send(Message::Token(token))); + messages.push(Message::Token(token)); }) .chain_err(|| "failed to create helper thread for jobserver management")?; // Create a helper thread to manage the diagnostics for rustfix if // necessary. - let tx = state.tx.clone(); + let messages = state.messages.clone(); + // It is important that this uses `push` instead of `push_bounded` for + // now. If someone wants to fix this to be bounded, the `drop` + // implementation needs to be changed to avoid possible deadlocks. let _diagnostic_server = cx .bcx .build_config .rustfix_diagnostic_server .borrow_mut() .take() - .map(move |srv| srv.start(move |msg| drop(tx.send(Message::FixDiagnostic(msg))))); + .map(move |srv| srv.start(move |msg| messages.push(Message::FixDiagnostic(msg)))); - crossbeam_utils::thread::scope(move |scope| state.drain_the_queue(cx, plan, scope, &helper)) - .expect("child threads shouldn't panic") + crossbeam_utils::thread::scope(move |scope| { + match state.drain_the_queue(cx, plan, scope, &helper) { + Some(err) => Err(err), + None => Ok(()), + } + }) + .expect("child threads shouldn't panic") } } -impl<'a, 'cfg> DrainState<'a, 'cfg> { +impl<'cfg> DrainState<'cfg> { fn spawn_work_if_possible( &mut self, - cx: &mut Context<'a, '_>, + cx: &mut Context<'_, '_>, jobserver_helper: &HelperThread, scope: &Scope<'_>, - has_errored: bool, ) -> CargoResult<()> { // Dequeue as much work as we can, learning about everything // possible that can run. Note that this is also the point where we @@ -408,11 +429,6 @@ } } - // Do not actually spawn the new work if we've errored out - if has_errored { - return Ok(()); - } - // Now that we've learned of all possible work that we can execute // try to spawn it so long as we've got a jobserver token which says // we're able to perform some parallel work. @@ -466,24 +482,24 @@ fn handle_event( &mut self, - cx: &mut Context<'a, '_>, + cx: &mut Context<'_, '_>, jobserver_helper: &HelperThread, plan: &mut BuildPlan, event: Message, - ) -> CargoResult> { + ) -> CargoResult<()> { match event { Message::Run(id, cmd) => { cx.bcx .config .shell() .verbose(|c| c.status("Running", &cmd))?; - self.timings.unit_start(id, self.active[&id]); + self.timings.unit_start(id, self.active[&id].clone()); } Message::BuildPlanMsg(module_name, cmd, filenames) => { plan.update(&module_name, &cmd, &filenames)?; } Message::Stdout(out) => { - cx.bcx.config.shell().stdout_println(out); + writeln!(cx.bcx.config.shell().out(), "{}", out)?; } Message::Stderr(err) => { let mut shell = cx.bcx.config.shell(); @@ -519,7 +535,7 @@ // in there as we'll get another `Finish` later on. Artifact::Metadata => { info!("end (meta): {:?}", id); - self.active[&id] + self.active[&id].clone() } }; info!("end ({:?}): {:?}", unit, result); @@ -528,17 +544,7 @@ Err(e) => { let msg = "The following warnings were emitted during compilation:"; self.emit_warnings(Some(msg), &unit, cx)?; - - if !self.active.is_empty() { - crate::display_error(&e, &mut *cx.bcx.config.shell()); - cx.bcx.config.shell().warn( - "build failed, waiting for other \ - jobs to finish...", - )?; - return Ok(Some(anyhow::format_err!("build failed"))); - } else { - return Ok(Some(e)); - } + return Err(e); } } } @@ -573,7 +579,7 @@ } } - Ok(None) + Ok(()) } // This will also tick the progress bar as appropriate @@ -584,7 +590,7 @@ // to run above to calculate CPU usage over time. To do this we // listen for a message with a timeout, and on timeout we run the // previous parts of the loop again. - let events: Vec<_> = self.rx.try_iter().collect(); + let mut events = self.messages.try_pop_all(); info!( "tokens in use: {}, rustc_tokens: {:?}, waiting_rustcs: {:?} (events this tick: {})", self.tokens.len(), @@ -602,23 +608,30 @@ loop { self.tick_progress(); self.tokens.truncate(self.active.len() - 1); - match self.rx.recv_timeout(Duration::from_millis(500)) { - Ok(message) => break vec![message], - Err(_) => continue, + match self.messages.pop(Duration::from_millis(500)) { + Some(message) => { + events.push(message); + break; + } + None => continue, } } - } else { - events } + events } + /// This is the "main" loop, where Cargo does all work to run the + /// compiler. + /// + /// This returns an Option to prevent the use of `?` on `Result` types + /// because it is important for the loop to carefully handle errors. fn drain_the_queue( mut self, - cx: &mut Context<'a, '_>, + cx: &mut Context<'_, '_>, plan: &mut BuildPlan, - scope: &Scope<'a>, + scope: &Scope<'_>, jobserver_helper: &HelperThread, - ) -> CargoResult<()> { + ) -> Option { trace!("queue: {:#?}", self.queue); // Iteratively execute the entire dependency graph. Each turn of the @@ -632,8 +645,15 @@ // successful and otherwise wait for pending work to finish if it failed // and then immediately return. let mut error = None; + // CAUTION! Do not use `?` or break out of the loop early. Every error + // must be handled in such a way that the loop is still allowed to + // drain event messages. loop { - self.spawn_work_if_possible(cx, jobserver_helper, scope, error.is_some())?; + if error.is_none() { + if let Err(e) = self.spawn_work_if_possible(cx, jobserver_helper, scope) { + self.handle_error(&mut cx.bcx.config.shell(), &mut error, e); + } + } // If after all that we're not actually running anything then we're // done! @@ -641,7 +661,9 @@ break; } - self.grant_rustc_token_requests()?; + if let Err(e) = self.grant_rustc_token_requests() { + self.handle_error(&mut cx.bcx.config.shell(), &mut error, e); + } // And finally, before we block waiting for the next event, drop any // excess tokens we may have accidentally acquired. Due to how our @@ -649,8 +671,8 @@ // don't actually use, and if this happens just relinquish it back // to the jobserver itself. for event in self.wait_for_events() { - if let Some(err) = self.handle_event(cx, jobserver_helper, plan, event)? { - error = Some(err); + if let Err(event_err) = self.handle_event(cx, jobserver_helper, plan, event) { + self.handle_error(&mut cx.bcx.config.shell(), &mut error, event_err); } } } @@ -675,22 +697,62 @@ } let time_elapsed = util::elapsed(cx.bcx.config.creation_time().elapsed()); - self.timings.finished(cx.bcx, &error)?; + if let Err(e) = self.timings.finished(cx.bcx, &error) { + if error.is_some() { + crate::display_error(&e, &mut cx.bcx.config.shell()); + } else { + return Some(e); + } + } + if cx.bcx.build_config.emit_json() { + let msg = machine_message::BuildFinished { + success: error.is_none(), + } + .to_json_string(); + if let Err(e) = writeln!(cx.bcx.config.shell().out(), "{}", msg) { + if error.is_some() { + crate::display_error(&e.into(), &mut cx.bcx.config.shell()); + } else { + return Some(e.into()); + } + } + } if let Some(e) = error { - Err(e) + Some(e) } else if self.queue.is_empty() && self.pending_queue.is_empty() { let message = format!( "{} [{}] target(s) in {}", profile_name, opt_type, time_elapsed ); if !cx.bcx.build_config.build_plan { - cx.bcx.config.shell().status("Finished", message)?; + // It doesn't really matter if this fails. + drop(cx.bcx.config.shell().status("Finished", message)); } - Ok(()) + None } else { debug!("queue: {:#?}", self.queue); - Err(internal("finished with jobs still left in the queue")) + Some(internal("finished with jobs still left in the queue")) + } + } + + fn handle_error( + &self, + shell: &mut Shell, + err_state: &mut Option, + new_err: anyhow::Error, + ) { + if err_state.is_some() { + // Already encountered one error. + log::warn!("{:?}", new_err); + } else { + if !self.active.is_empty() { + crate::display_error(&new_err, shell); + drop(shell.warn("build failed, waiting for other jobs to finish...")); + *err_state = Some(anyhow::format_err!("build failed")); + } else { + *err_state = Some(new_err); + } } } @@ -721,7 +783,7 @@ )); } - fn name_for_progress(&self, unit: &Unit<'_>) -> String { + fn name_for_progress(&self, unit: &Unit) -> String { let pkg_name = unit.pkg.name(); match unit.mode { CompileMode::Doc { .. } => format!("{}(doc)", pkg_name), @@ -743,9 +805,9 @@ /// Executes a job, pushing the spawned thread's handled onto `threads`. fn run( &mut self, - unit: &Unit<'a>, + unit: &Unit, job: Job, - cx: &Context<'a, '_>, + cx: &Context<'_, '_>, scope: &Scope<'_>, ) -> CargoResult<()> { let id = JobId(self.next_id); @@ -753,10 +815,10 @@ info!("start {}: {:?}", id, unit); - assert!(self.active.insert(id, *unit).is_none()); + assert!(self.active.insert(id, unit.clone()).is_none()); *self.counts.get_mut(&unit.pkg.package_id()).unwrap() -= 1; - let my_tx = self.tx.clone(); + let messages = self.messages.clone(); let fresh = job.freshness(); let rmeta_required = cx.rmeta_required(unit); @@ -768,13 +830,13 @@ let doit = move || { let state = JobState { id, - tx: my_tx.clone(), + messages: messages.clone(), rmeta_required: Cell::new(rmeta_required), _marker: marker::PhantomData, }; let mut sender = FinishOnDrop { - tx: &my_tx, + messages: &messages, id, result: Err(format_err!("worker panicked")), }; @@ -793,9 +855,7 @@ // we need to make sure that the metadata is flagged as produced so // send a synthetic message here. if state.rmeta_required.get() && sender.result.is_ok() { - my_tx - .send(Message::Finish(id, Artifact::Metadata, Ok(()))) - .unwrap(); + messages.push(Message::Finish(id, Artifact::Metadata, Ok(()))); } // Use a helper struct with a `Drop` implementation to guarantee @@ -803,7 +863,7 @@ // shouldn't panic unless there's a bug in Cargo, so we just need // to make sure nothing hangs by accident. struct FinishOnDrop<'a> { - tx: &'a Sender, + messages: &'a Queue, id: JobId, result: CargoResult<()>, } @@ -811,21 +871,17 @@ impl Drop for FinishOnDrop<'_> { fn drop(&mut self) { let msg = mem::replace(&mut self.result, Ok(())); - drop(self.tx.send(Message::Finish(self.id, Artifact::All, msg))); + self.messages + .push(Message::Finish(self.id, Artifact::All, msg)); } } }; match fresh { - Freshness::Fresh => { - self.timings.add_fresh(); - doit(); - } - Freshness::Dirty => { - self.timings.add_dirty(); - scope.spawn(move |_| doit()); - } + Freshness::Fresh => self.timings.add_fresh(), + Freshness::Dirty => self.timings.add_dirty(), } + scope.spawn(move |_| doit()); Ok(()) } @@ -833,11 +889,11 @@ fn emit_warnings( &mut self, msg: Option<&str>, - unit: &Unit<'a>, - cx: &mut Context<'a, '_>, + unit: &Unit, + cx: &mut Context<'_, '_>, ) -> CargoResult<()> { let outputs = cx.build_script_outputs.lock().unwrap(); - let metadata = match cx.find_build_script_metadata(*unit) { + let metadata = match cx.find_build_script_metadata(unit.clone()) { Some(metadata) => metadata, None => return Ok(()), }; @@ -865,11 +921,11 @@ fn finish( &mut self, id: JobId, - unit: &Unit<'a>, + unit: &Unit, artifact: Artifact, - cx: &mut Context<'a, '_>, + cx: &mut Context<'_, '_>, ) -> CargoResult<()> { - if unit.mode.is_run_custom_build() && cx.bcx.show_warnings(unit.pkg.package_id()) { + if unit.mode.is_run_custom_build() && unit.show_warnings(cx.bcx.config) { self.emit_warnings(None, unit, cx)?; } let unlocked = self.queue.finish(unit, &artifact); @@ -892,7 +948,7 @@ fn note_working_on( &mut self, config: &Config, - unit: &Unit<'a>, + unit: &Unit, fresh: Freshness, ) -> CargoResult<()> { if (self.compiled.contains(&unit.pkg.package_id()) && !unit.mode.is_doc()) @@ -907,15 +963,15 @@ Dirty => { if unit.mode.is_doc() { self.documented.insert(unit.pkg.package_id()); - config.shell().status("Documenting", unit.pkg)?; + config.shell().status("Documenting", &unit.pkg)?; } else if unit.mode.is_doc_test() { // Skip doc test. } else { self.compiled.insert(unit.pkg.package_id()); if unit.mode.is_check() { - config.shell().status("Checking", unit.pkg)?; + config.shell().status("Checking", &unit.pkg)?; } else { - config.shell().status("Compiling", unit.pkg)?; + config.shell().status("Compiling", &unit.pkg)?; } } } @@ -925,7 +981,7 @@ && !(unit.mode.is_doc_test() && self.compiled.contains(&unit.pkg.package_id())) { self.compiled.insert(unit.pkg.package_id()); - config.shell().verbose(|c| c.status("Fresh", unit.pkg))?; + config.shell().verbose(|c| c.status("Fresh", &unit.pkg))?; } } } diff -Nru cargo-0.44.1/src/cargo/core/compiler/layout.rs cargo-0.47.0/src/cargo/core/compiler/layout.rs --- cargo-0.44.1/src/cargo/core/compiler/layout.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/layout.rs 2020-07-17 20:39:39.000000000 +0000 @@ -26,19 +26,20 @@ //! # packages //! .fingerprint/ //! # Each package is in a separate directory. +//! # Note that different target kinds have different filename prefixes. //! $pkgname-$META/ //! # Set of source filenames for this package. -//! dep-lib-$pkgname-$META +//! dep-lib-$targetname //! # Timestamp when this package was last built. //! invoked.timestamp //! # The fingerprint hash. -//! lib-$pkgname-$META +//! lib-$targetname //! # Detailed information used for logging the reason why //! # something is being recompiled. -//! lib-$pkgname-$META.json +//! lib-$targetname.json //! # The console output from the compiler. This is cached //! # so that warnings can be redisplayed for "fresh" units. -//! output +//! output-lib-$targetname //! //! # This is the root directory for all rustc artifacts except build //! # scripts, examples, and test and bench executables. Almost every @@ -129,12 +130,6 @@ _lock: FileLock, } -pub fn is_bad_artifact_name(name: &str) -> bool { - ["deps", "examples", "build", "incremental"] - .iter() - .any(|&reserved| reserved == name) -} - impl Layout { /// Calculate the paths for build output, lock the build directory, and return as a Layout. /// @@ -155,10 +150,11 @@ // If the root directory doesn't already exist go ahead and create it // here. Use this opportunity to exclude it from backups as well if the // system supports it since this is a freshly created folder. - if !dest.as_path_unlocked().exists() { - dest.create_dir()?; - exclude_from_backups(dest.as_path_unlocked()); - } + // + paths::create_dir_all_excluded_from_backups_atomic(root.as_path_unlocked())?; + // Now that the excluded from backups target root is created we can create the + // actual destination (sub)subdirectory. + paths::create_dir_all(dest.as_path_unlocked())?; // For now we don't do any more finer-grained locking on the artifact // directory, so just lock the entire thing for the duration of this @@ -224,32 +220,3 @@ &self.build } } - -#[cfg(not(target_os = "macos"))] -fn exclude_from_backups(_: &Path) {} - -#[cfg(target_os = "macos")] -/// Marks files or directories as excluded from Time Machine on macOS -/// -/// This is recommended to prevent derived/temporary files from bloating backups. -fn exclude_from_backups(path: &Path) { - use core_foundation::base::TCFType; - use core_foundation::{number, string, url}; - use std::ptr; - - // For compatibility with 10.7 a string is used instead of global kCFURLIsExcludedFromBackupKey - let is_excluded_key: Result = "NSURLIsExcludedFromBackupKey".parse(); - let path = url::CFURL::from_path(path, false); - if let (Some(path), Ok(is_excluded_key)) = (path, is_excluded_key) { - unsafe { - url::CFURLSetResourcePropertyForKey( - path.as_concrete_TypeRef(), - is_excluded_key.as_concrete_TypeRef(), - number::kCFBooleanTrue as *const _, - ptr::null_mut(), - ); - } - } - // Errors are ignored, since it's an optional feature and failure - // doesn't prevent Cargo from working -} diff -Nru cargo-0.44.1/src/cargo/core/compiler/links.rs cargo-0.47.0/src/cargo/core/compiler/links.rs --- cargo-0.44.1/src/cargo/core/compiler/links.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/links.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,11 +1,11 @@ -use super::unit_dependencies::UnitGraph; +use super::unit_graph::UnitGraph; use crate::core::{PackageId, Resolve}; use crate::util::errors::CargoResult; use std::collections::{HashMap, HashSet}; use std::fmt::Write; /// Validate `links` field does not conflict between packages. -pub fn validate_links(resolve: &Resolve, unit_graph: &UnitGraph<'_>) -> CargoResult<()> { +pub fn validate_links(resolve: &Resolve, unit_graph: &UnitGraph) -> CargoResult<()> { // NOTE: This is the *old* links validator. Links are usually validated in // the resolver. However, the `links` field was added to the index in // early 2018 (see https://github.com/rust-lang/cargo/pull/4978). However, diff -Nru cargo-0.44.1/src/cargo/core/compiler/lto.rs cargo-0.47.0/src/cargo/core/compiler/lto.rs --- cargo-0.44.1/src/cargo/core/compiler/lto.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/lto.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,192 @@ +use crate::core::compiler::{CompileMode, Context, CrateType, Unit}; +use crate::core::profiles; +use crate::util::interning::InternedString; + +use crate::util::errors::CargoResult; +use std::collections::hash_map::{Entry, HashMap}; + +/// Possible ways to run rustc and request various parts of LTO. +/// +/// Variant | Flag | Object Code | Bitcode +/// -------------------|------------------------|-------------|-------- +/// `Run` | `-C lto=foo` | n/a | n/a +/// `Off` | `-C lto=off` | n/a | n/a +/// `OnlyBitcode` | `-C linker-plugin-lto` | | ✓ +/// `ObjectAndBitcode` | | ✓ | ✓ +/// `OnlyObject` | `-C embed-bitcode=no` | ✓ | +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum Lto { + /// LTO is run for this rustc, and it's `-Clto=foo`. If the given value is + /// None, that corresponds to `-Clto` with no argument, which means do + /// "fat" LTO. + Run(Option), + + /// LTO has been explicitly listed as "off". This means no thin-local-LTO, + /// no LTO anywhere, I really mean it! + Off, + + /// This rustc invocation only needs to produce bitcode (it is *only* used + /// for LTO), there's no need to produce object files, so we can pass + /// `-Clinker-plugin-lto` + OnlyBitcode, + + /// This rustc invocation needs to embed bitcode in object files. This means + /// that object files may be used for a normal link, and the crate may be + /// loaded for LTO later, so both are required. + ObjectAndBitcode, + + /// This should not include bitcode. This is primarily to reduce disk + /// space usage. + OnlyObject, +} + +pub fn generate(cx: &mut Context<'_, '_>) -> CargoResult<()> { + let mut map = HashMap::new(); + for unit in cx.bcx.roots.iter() { + let root_lto = match unit.profile.lto { + // LTO not requested, no need for bitcode. + profiles::Lto::Bool(false) | profiles::Lto::Off => Lto::OnlyObject, + _ => { + let crate_types = unit.target.rustc_crate_types(); + if unit.target.for_host() { + Lto::OnlyObject + } else if needs_object(&crate_types) { + lto_when_needs_object(&crate_types) + } else { + // This may or may not participate in LTO, let's start + // with the minimum requirements. This may be expanded in + // `calculate` below if necessary. + Lto::OnlyBitcode + } + } + }; + calculate(cx, &mut map, unit, root_lto)?; + } + cx.lto = map; + Ok(()) +} + +/// Whether or not any of these crate types need object code. +fn needs_object(crate_types: &[CrateType]) -> bool { + crate_types.iter().any(|k| k.can_lto() || k.is_dynamic()) +} + +/// Lto setting to use when this unit needs object code. +fn lto_when_needs_object(crate_types: &[CrateType]) -> Lto { + if crate_types.iter().any(CrateType::can_lto) { + // A mixed rlib/cdylib whose parent is running LTO. This + // needs both, for bitcode in the rlib (for LTO) and the + // cdylib requires object code. + Lto::ObjectAndBitcode + } else { + // A dylib whose parent is running LTO. rustc currently + // doesn't support LTO with dylibs, so bitcode is not + // needed. + Lto::OnlyObject + } +} + +fn calculate( + cx: &Context<'_, '_>, + map: &mut HashMap, + unit: &Unit, + parent_lto: Lto, +) -> CargoResult<()> { + let crate_types = match unit.mode { + // Note: Doctest ignores LTO, but for now we'll compute it as-if it is + // a Bin, in case it is ever supported in the future. + CompileMode::Test | CompileMode::Bench | CompileMode::Doctest => vec![CrateType::Bin], + // Notes on other modes: + // - Check: Treat as the underlying type, it doesn't really matter. + // - Doc: LTO is N/A for the Doc unit itself since rustdoc does not + // support codegen flags. We still compute the dependencies, which + // are mostly `Check`. + // - RunCustomBuild is ignored because it is always "for_host". + _ => unit.target.rustc_crate_types(), + }; + // LTO can only be performed if *all* of the crate types support it. + // For example, a cdylib/rlib combination won't allow LTO. + let all_lto_types = crate_types.iter().all(CrateType::can_lto); + // Compute the LTO based on the profile, and what our parent requires. + let lto = if unit.target.for_host() { + // Disable LTO for host builds since we only really want to perform LTO + // for the final binary, and LTO on plugins/build scripts/proc macros is + // largely not desired. + Lto::OnlyObject + } else if all_lto_types { + // Note that this ignores the `parent_lto` because this isn't a + // linkable crate type; this unit is not being embedded in the parent. + match unit.profile.lto { + profiles::Lto::Named(s) => Lto::Run(Some(s)), + profiles::Lto::Off => Lto::Off, + profiles::Lto::Bool(true) => Lto::Run(None), + profiles::Lto::Bool(false) => Lto::OnlyObject, + } + } else { + match (parent_lto, needs_object(&crate_types)) { + // An rlib whose parent is running LTO, we only need bitcode. + (Lto::Run(_), false) => Lto::OnlyBitcode, + // LTO when something needs object code. + (Lto::Run(_), true) | (Lto::OnlyBitcode, true) => lto_when_needs_object(&crate_types), + // LTO is disabled, no need for bitcode. + (Lto::Off, _) => Lto::OnlyObject, + // If this doesn't have any requirements, or the requirements are + // already satisfied, then stay with our parent. + (_, false) | (Lto::OnlyObject, true) | (Lto::ObjectAndBitcode, true) => parent_lto, + } + }; + + // Merge the computed LTO. If this unit appears multiple times in the + // graph, the merge may expand the requirements. + let merged_lto = match map.entry(unit.clone()) { + // If we haven't seen this unit before then insert our value and keep + // going. + Entry::Vacant(v) => *v.insert(lto), + + Entry::Occupied(mut v) => { + let result = match (lto, v.get()) { + // No change in requirements. + (Lto::OnlyBitcode, Lto::OnlyBitcode) => Lto::OnlyBitcode, + (Lto::OnlyObject, Lto::OnlyObject) => Lto::OnlyObject, + + // Once we're running LTO we keep running LTO. We should always + // calculate the same thing here each iteration because if we + // see this twice then it means, for example, two unit tests + // depend on a binary, which is normal. + (Lto::Run(s), _) | (_, &Lto::Run(s)) => Lto::Run(s), + + // Off means off! This has the same reasoning as `Lto::Run`. + (Lto::Off, _) | (_, Lto::Off) => Lto::Off, + + // Once a target has requested both, that's the maximal amount + // of work that can be done, so we just keep doing that work. + (Lto::ObjectAndBitcode, _) | (_, Lto::ObjectAndBitcode) => Lto::ObjectAndBitcode, + + // Upgrade so that both requirements can be met. + // + // This is where the trickiness happens. This unit needs + // bitcode and the previously calculated value for this unit + // says it didn't need bitcode (or vice versa). This means that + // we're a shared dependency between some targets which require + // LTO and some which don't. This means that instead of being + // either only-objects or only-bitcode we have to embed both in + // rlibs (used for different compilations), so we switch to + // including both. + (Lto::OnlyObject, Lto::OnlyBitcode) | (Lto::OnlyBitcode, Lto::OnlyObject) => { + Lto::ObjectAndBitcode + } + }; + // No need to recurse if we calculated the same value as before. + if result == *v.get() { + return Ok(()); + } + v.insert(result); + result + } + }; + + for dep in cx.unit_deps(unit) { + calculate(cx, map, &dep.unit, merged_lto)?; + } + Ok(()) +} diff -Nru cargo-0.44.1/src/cargo/core/compiler/mod.rs cargo-0.47.0/src/cargo/core/compiler/mod.rs --- cargo-0.44.1/src/cargo/core/compiler/mod.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/mod.rs 2020-07-17 20:39:39.000000000 +0000 @@ -4,17 +4,21 @@ mod compilation; mod compile_kind; mod context; +mod crate_type; mod custom_build; mod fingerprint; mod job; mod job_queue; mod layout; mod links; +mod lto; mod output_depinfo; +pub mod rustdoc; pub mod standard_lib; mod timings; mod unit; pub mod unit_dependencies; +pub mod unit_graph; use std::env; use std::ffi::{OsStr, OsString}; @@ -28,27 +32,32 @@ use log::debug; pub use self::build_config::{BuildConfig, CompileMode, MessageFormat}; -pub use self::build_context::{BuildContext, FileFlavor, RustcTargetData, TargetInfo}; +pub use self::build_context::{BuildContext, FileFlavor, FileType, RustcTargetData, TargetInfo}; use self::build_plan::BuildPlan; pub use self::compilation::{Compilation, Doctest}; pub use self::compile_kind::{CompileKind, CompileTarget}; pub use self::context::{Context, Metadata}; +pub use self::crate_type::CrateType; pub use self::custom_build::{BuildOutput, BuildScriptOutputs, BuildScripts}; pub use self::job::Freshness; use self::job::{Job, Work}; use self::job_queue::{JobQueue, JobState}; -pub use self::layout::is_bad_artifact_name; +pub(crate) use self::layout::Layout; +pub use self::lto::Lto; use self::output_depinfo::output_depinfo; -use self::unit_dependencies::UnitDep; +use self::unit_graph::UnitDep; pub use crate::core::compiler::unit::{Unit, UnitInterner}; use crate::core::manifest::TargetSourcePath; -use crate::core::profiles::{Lto, PanicStrategy, Profile}; -use crate::core::{Edition, Feature, InternedString, PackageId, Target}; +use crate::core::profiles::{PanicStrategy, Profile, Strip}; +use crate::core::{Edition, Feature, PackageId, Target}; use crate::util::errors::{self, CargoResult, CargoResultExt, ProcessError, VerboseError}; +use crate::util::interning::InternedString; use crate::util::machine_message::Message; use crate::util::{self, machine_message, ProcessBuilder}; use crate::util::{internal, join_paths, paths, profile}; +const RUSTDOC_CRATE_VERSION_FLAG: &str = "--crate-version"; + /// A glorified callback for executing calls to rustc. Rather than calling rustc /// directly, we'll use an `Executor`, giving clients an opportunity to intercept /// the build calls. @@ -56,13 +65,13 @@ /// Called after a rustc process invocation is prepared up-front for a given /// unit of work (may still be modified for runtime-known dependencies, when /// the work is actually executed). - fn init<'a, 'cfg>(&self, _cx: &Context<'a, 'cfg>, _unit: &Unit<'a>) {} + fn init(&self, _cx: &Context<'_, '_>, _unit: &Unit) {} /// In case of an `Err`, Cargo will not continue with the build process for /// this package. fn exec( &self, - cmd: ProcessBuilder, + cmd: &ProcessBuilder, id: PackageId, target: &Target, mode: CompileMode, @@ -72,7 +81,7 @@ /// Queried when queuing each unit of work. If it returns true, then the /// unit will always be rebuilt, independent of whether it needs to be. - fn force_rebuild(&self, _unit: &Unit<'_>) -> bool { + fn force_rebuild(&self, _unit: &Unit) -> bool { false } } @@ -85,7 +94,7 @@ impl Executor for DefaultExecutor { fn exec( &self, - cmd: ProcessBuilder, + cmd: &ProcessBuilder, _id: PackageId, _target: &Target, _mode: CompileMode, @@ -97,17 +106,17 @@ } } -fn compile<'a, 'cfg: 'a>( - cx: &mut Context<'a, 'cfg>, - jobs: &mut JobQueue<'a, 'cfg>, +fn compile<'cfg>( + cx: &mut Context<'_, 'cfg>, + jobs: &mut JobQueue<'cfg>, plan: &mut BuildPlan, - unit: &Unit<'a>, + unit: &Unit, exec: &Arc, force_rebuild: bool, ) -> CargoResult<()> { let bcx = cx.bcx; let build_plan = bcx.build_config.build_plan; - if !cx.compiled.insert(*unit) { + if !cx.compiled.insert(unit.clone()) { return Ok(()); } @@ -134,13 +143,13 @@ }; work.then(link_targets(cx, unit, false)?) } else { - let work = if cx.bcx.show_warnings(unit.pkg.package_id()) { + let work = if unit.show_warnings(bcx.config) { replay_output_cache( unit.pkg.package_id(), - unit.target, + &unit.target, cx.files().message_cache_path(unit), cx.bcx.build_config.message_format, - cx.bcx.config.shell().supports_color(), + cx.bcx.config.shell().err_supports_color(), ) } else { Work::noop() @@ -166,11 +175,7 @@ Ok(()) } -fn rustc<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, - exec: &Arc, -) -> CargoResult { +fn rustc(cx: &mut Context<'_, '_>, unit: &Unit, exec: &Arc) -> CargoResult { let mut rustc = prepare_rustc(cx, &unit.target.rustc_crate_types(), unit)?; let build_plan = cx.bcx.build_config.build_plan; @@ -191,17 +196,12 @@ // don't pass the `-l` flags. let pass_l_flag = unit.target.is_lib() || !unit.pkg.targets().iter().any(|t| t.is_lib()); let pass_cdylib_link_args = unit.target.is_cdylib(); - let do_rename = unit.target.allows_underscores() && !unit.mode.is_any_test(); - let real_name = unit.target.name().to_string(); - let crate_name = unit.target.crate_name(); - - // Rely on `target_filenames` iterator as source of truth rather than rederiving filestem. - let rustc_dep_info_loc = if do_rename && cx.files().metadata(unit).is_none() { - root.join(&crate_name) - } else { - root.join(&cx.files().file_stem(unit)) - } - .with_extension("d"); + + let dep_info_name = match cx.files().metadata(unit) { + Some(metadata) => format!("{}-{}.d", unit.target.crate_name(), metadata), + None => format!("{}.d", unit.target.crate_name()), + }; + let rustc_dep_info_loc = root.join(dep_info_name); let dep_info_loc = fingerprint::dep_info_loc(cx, unit); rustc.args(cx.bcx.rustflags_args(unit)); @@ -210,7 +210,7 @@ } let mut output_options = OutputOptions::new(cx, unit); let package_id = unit.pkg.package_id(); - let target = unit.target.clone(); + let target = Target::clone(&unit.target); let mode = unit.mode; exec.init(cx, unit); @@ -224,7 +224,8 @@ .unwrap_or_else(|| cx.bcx.config.cwd()) .to_path_buf(); let fingerprint_dir = cx.files().fingerprint_dir(unit); - let script_metadata = cx.find_build_script_metadata(*unit); + let script_metadata = cx.find_build_script_metadata(unit.clone()); + let is_local = unit.is_local(); return Ok(Work::new(move |state| { // Only at runtime have we discovered what the extra -L and -l @@ -281,7 +282,7 @@ state.build_plan(buildkey, rustc.clone(), outputs.clone()); } else { exec.exec( - rustc, + &rustc, package_id, &target, mode, @@ -292,20 +293,6 @@ .chain_err(|| format!("could not compile `{}`.", name))?; } - if do_rename && real_name != crate_name { - let dst = &outputs[0].path; - let src = dst.with_file_name( - dst.file_name() - .unwrap() - .to_str() - .unwrap() - .replace(&real_name, &crate_name), - ); - if src.exists() && src.file_name() != dst.file_name() { - fs::rename(&src, &dst).chain_err(|| format!("could not rename crate {:?}", src))?; - } - } - if rustc_dep_info_loc.exists() { fingerprint::translate_dep_info( &rustc_dep_info_loc, @@ -313,8 +300,9 @@ &cwd, &pkg_root, &target_dir, + &rustc, // Do not track source files in the fingerprint for registry dependencies. - current_id.source_id().is_path(), + is_local, ) .chain_err(|| { internal(format!( @@ -322,8 +310,9 @@ rustc_dep_info_loc.display() )) })?; - debug!("rewinding mtime of {:?} to {}", dep_info_loc, timestamp); - filetime::set_file_times(dep_info_loc, timestamp, timestamp)?; + // This mtime shift allows Cargo to detect if a source file was + // modified in the middle of the build. + paths::set_file_time_no_err(dep_info_loc, timestamp); } Ok(()) @@ -392,11 +381,7 @@ /// Link the compiled target (often of form `foo-{metadata_hash}`) to the /// final target. This must happen during both "Fresh" and "Compile". -fn link_targets<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, - fresh: bool, -) -> CargoResult { +fn link_targets(cx: &mut Context<'_, '_>, unit: &Unit, fresh: bool) -> CargoResult { let bcx = cx.bcx; let outputs = cx.outputs(unit)?; let export_dir = cx.files().export_dir(); @@ -406,7 +391,7 @@ let features = unit.features.iter().map(|s| s.to_string()).collect(); let json_messages = bcx.build_config.emit_json(); let executable = cx.get_executable(unit)?; - let mut target = unit.target.clone(); + let mut target = Target::clone(&unit.target); if let TargetSourcePath::Metabuild = target.src_path() { // Give it something to serialize. let path = unit.pkg.manifest().metabuild_path(cx.bcx.ws.target_dir()); @@ -533,19 +518,22 @@ search_path } -fn prepare_rustc<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - crate_types: &[&str], - unit: &Unit<'a>, +fn prepare_rustc( + cx: &mut Context<'_, '_>, + crate_types: &[CrateType], + unit: &Unit, ) -> CargoResult { let is_primary = cx.is_primary_package(unit); + let is_workspace = cx.bcx.ws.is_member(&unit.pkg); - let mut base = cx.compilation.rustc_process(unit.pkg, is_primary)?; + let mut base = cx + .compilation + .rustc_process(unit, is_primary, is_workspace)?; if cx.bcx.config.cli_unstable().jobserver_per_rustc { let client = cx.new_jobserver()?; base.inherit_jobserver(&client); base.arg("-Zjobserver-token-requests"); - assert!(cx.rustc_clients.insert(*unit, client).is_none()); + assert!(cx.rustc_clients.insert(unit.clone(), client).is_none()); } else { base.inherit_jobserver(&cx.jobserver); } @@ -554,12 +542,11 @@ Ok(base) } -fn rustdoc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult { +fn rustdoc(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult { let bcx = cx.bcx; - let mut rustdoc = cx.compilation.rustdoc_process(unit.pkg, unit.target)?; + let mut rustdoc = cx.compilation.rustdoc_process(unit)?; rustdoc.inherit_jobserver(&cx.jobserver); rustdoc.arg("--crate-name").arg(&unit.target.crate_name()); - add_crate_versions_if_requested(bcx, unit, &mut rustdoc); add_path_args(bcx, unit, &mut rustdoc); add_cap_lints(bcx, unit, &mut rustdoc); @@ -582,21 +569,24 @@ add_error_format_and_color(cx, &mut rustdoc, false)?; - if let Some(args) = bcx.extra_args_for(unit) { + if let Some(args) = cx.bcx.extra_args_for(unit) { rustdoc.args(args); } build_deps_args(&mut rustdoc, cx, unit)?; + rustdoc::add_root_urls(cx, unit, &mut rustdoc)?; rustdoc.args(bcx.rustdocflags_args(unit)); + add_crate_versions_if_requested(bcx, unit, &mut rustdoc); + let name = unit.pkg.name().to_string(); let build_script_outputs = Arc::clone(&cx.build_script_outputs); let package_id = unit.pkg.package_id(); - let target = unit.target.clone(); + let target = Target::clone(&unit.target); let mut output_options = OutputOptions::new(cx, unit); let pkg_id = unit.pkg.package_id(); - let script_metadata = cx.find_build_script_metadata(*unit); + let script_metadata = cx.find_build_script_metadata(unit.clone()); Ok(Work::new(move |state| { if let Some(script_metadata) = script_metadata { @@ -628,17 +618,27 @@ fn add_crate_versions_if_requested( bcx: &BuildContext<'_, '_>, - unit: &Unit<'_>, + unit: &Unit, rustdoc: &mut ProcessBuilder, ) { - if !bcx.config.cli_unstable().crate_versions { - return; + if bcx.config.cli_unstable().crate_versions && !crate_version_flag_already_present(rustdoc) { + append_crate_version_flag(unit, rustdoc); } +} + +// The --crate-version flag could have already been passed in RUSTDOCFLAGS +// or as an extra compiler argument for rustdoc +fn crate_version_flag_already_present(rustdoc: &ProcessBuilder) -> bool { + rustdoc.get_args().iter().any(|flag| { + flag.to_str() + .map_or(false, |flag| flag.starts_with(RUSTDOC_CRATE_VERSION_FLAG)) + }) +} + +fn append_crate_version_flag(unit: &Unit, rustdoc: &mut ProcessBuilder) { rustdoc - .arg("-Z") - .arg("unstable-options") - .arg("--crate-version") - .arg(&unit.pkg.version().to_string()); + .arg(RUSTDOC_CRATE_VERSION_FLAG) + .arg(unit.pkg.version().to_string()); } // The path that we pass to rustc is actually fairly important because it will @@ -655,7 +655,7 @@ // // The first returned value here is the argument to pass to rustc, and the // second is the cwd that rustc should operate in. -fn path_args(bcx: &BuildContext<'_, '_>, unit: &Unit<'_>) -> (PathBuf, PathBuf) { +fn path_args(bcx: &BuildContext<'_, '_>, unit: &Unit) -> (PathBuf, PathBuf) { let ws_root = bcx.ws.root(); let src = match unit.target.src_path() { TargetSourcePath::Path(path) => path.to_path_buf(), @@ -670,21 +670,21 @@ (src, unit.pkg.root().to_path_buf()) } -fn add_path_args(bcx: &BuildContext<'_, '_>, unit: &Unit<'_>, cmd: &mut ProcessBuilder) { +fn add_path_args(bcx: &BuildContext<'_, '_>, unit: &Unit, cmd: &mut ProcessBuilder) { let (arg, cwd) = path_args(bcx, unit); cmd.arg(arg); cmd.cwd(cwd); } -fn add_cap_lints(bcx: &BuildContext<'_, '_>, unit: &Unit<'_>, cmd: &mut ProcessBuilder) { +fn add_cap_lints(bcx: &BuildContext<'_, '_>, unit: &Unit, cmd: &mut ProcessBuilder) { // If this is an upstream dep we don't want warnings from, turn off all // lints. - if !bcx.show_warnings(unit.pkg.package_id()) { + if !unit.show_warnings(bcx.config) { cmd.arg("--cap-lints").arg("allow"); // If this is an upstream dep but we *do* want warnings, make sure that they // don't fail compilation. - } else if !unit.pkg.package_id().source_id().is_path() { + } else if !unit.is_local() { cmd.arg("--cap-lints").arg("warn"); } } @@ -719,18 +719,17 @@ Ok(()) } -fn build_base_args<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, +fn build_base_args( + cx: &mut Context<'_, '_>, cmd: &mut ProcessBuilder, - unit: &Unit<'a>, - crate_types: &[&str], + unit: &Unit, + crate_types: &[CrateType], ) -> CargoResult<()> { assert!(!unit.mode.is_run_custom_build()); let bcx = cx.bcx; let Profile { ref opt_level, - ref lto, codegen_units, debuginfo, debug_assertions, @@ -738,6 +737,7 @@ rpath, ref panic, incremental, + strip, .. } = unit.profile; let test = unit.mode.is_any_test(); @@ -754,7 +754,7 @@ if !test { for crate_type in crate_types.iter() { - cmd.arg("--crate-type").arg(crate_type); + cmd.arg("--crate-type").arg(crate_type.as_str()); } } @@ -770,7 +770,7 @@ } let prefer_dynamic = (unit.target.for_host() && !unit.target.is_custom_build()) - || (crate_types.contains(&"dylib") && bcx.ws.members().any(|p| p != unit.pkg)); + || (crate_types.contains(&CrateType::Dylib) && bcx.ws.members().any(|p| *p != unit.pkg)); if prefer_dynamic { cmd.arg("-C").arg("prefer-dynamic"); } @@ -783,16 +783,42 @@ cmd.arg("-C").arg(format!("panic={}", panic)); } - // Disable LTO for host builds as prefer_dynamic and it are mutually - // exclusive. - if unit.target.can_lto() && !unit.target.for_host() { - match *lto { - Lto::Bool(false) => {} - Lto::Bool(true) => { - cmd.args(&["-C", "lto"]); + match cx.lto[unit] { + lto::Lto::Run(None) => { + cmd.arg("-C").arg("lto"); + } + lto::Lto::Run(Some(s)) => { + cmd.arg("-C").arg(format!("lto={}", s)); + } + lto::Lto::Off => { + cmd.arg("-C").arg("lto=off"); + } + lto::Lto::ObjectAndBitcode => {} // this is rustc's default + lto::Lto::OnlyBitcode => { + // Note that this compiler flag, like the one below, is just an + // optimization in terms of build time. If we don't pass it then + // both object code and bitcode will show up. This is lagely just + // compat until the feature lands on stable and we can remove the + // conditional branch. + if cx + .bcx + .target_data + .info(CompileKind::Host) + .supports_embed_bitcode + .unwrap() + { + cmd.arg("-Clinker-plugin-lto"); } - Lto::Named(ref s) => { - cmd.arg("-C").arg(format!("lto={}", s)); + } + lto::Lto::OnlyObject => { + if cx + .bcx + .target_data + .info(CompileKind::Host) + .supports_embed_bitcode + .unwrap() + { + cmd.arg("-Cembed-bitcode=no"); } } } @@ -807,7 +833,7 @@ cmd.arg("-C").arg(format!("debuginfo={}", debuginfo)); } - if let Some(args) = bcx.extra_args_for(unit) { + if let Some(args) = cx.bcx.extra_args_for(unit) { cmd.args(args); } @@ -893,6 +919,10 @@ opt(cmd, "-C", "incremental=", Some(dir)); } + if strip != Strip::None { + cmd.arg("-Z").arg(format!("strip={}", strip)); + } + if unit.is_std { // -Zforce-unstable-if-unmarked prevents the accidental use of // unstable crates within the sysroot (such as "extern crate libc" or @@ -922,10 +952,10 @@ Ok(()) } -fn build_deps_args<'a, 'cfg>( +fn build_deps_args( cmd: &mut ProcessBuilder, - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, + cx: &mut Context<'_, '_>, + unit: &Unit, ) -> CargoResult<()> { let bcx = cx.bcx; cmd.arg("-L").arg(&{ @@ -951,7 +981,7 @@ // error in the future (see PR #4797). if !deps .iter() - .any(|dep| !dep.unit.mode.is_doc() && dep.unit.target.linkable()) + .any(|dep| !dep.unit.mode.is_doc() && dep.unit.target.is_linkable()) { if let Some(dep) = deps .iter() @@ -991,73 +1021,70 @@ } /// Generates a list of `--extern` arguments. -pub fn extern_args<'a>( - cx: &Context<'a, '_>, - unit: &Unit<'a>, +pub fn extern_args( + cx: &Context<'_, '_>, + unit: &Unit, unstable_opts: &mut bool, ) -> CargoResult> { let mut result = Vec::new(); let deps = cx.unit_deps(unit); // Closure to add one dependency to `result`. - let mut link_to = |dep: &UnitDep<'a>, - extern_crate_name: InternedString, - noprelude: bool| - -> CargoResult<()> { - let mut value = OsString::new(); - let mut opts = Vec::new(); - if unit - .pkg - .manifest() - .features() - .require(Feature::public_dependency()) - .is_ok() - && !dep.public - { - opts.push("priv"); - *unstable_opts = true; - } - if noprelude { - opts.push("noprelude"); - *unstable_opts = true; - } - if !opts.is_empty() { - value.push(opts.join(",")); - value.push(":"); - } - value.push(extern_crate_name.as_str()); - value.push("="); - - let mut pass = |file| { - let mut value = value.clone(); - value.push(file); - result.push(OsString::from("--extern")); - result.push(value); - }; + let mut link_to = + |dep: &UnitDep, extern_crate_name: InternedString, noprelude: bool| -> CargoResult<()> { + let mut value = OsString::new(); + let mut opts = Vec::new(); + if unit + .pkg + .manifest() + .features() + .require(Feature::public_dependency()) + .is_ok() + && !dep.public + { + opts.push("priv"); + *unstable_opts = true; + } + if noprelude { + opts.push("noprelude"); + *unstable_opts = true; + } + if !opts.is_empty() { + value.push(opts.join(",")); + value.push(":"); + } + value.push(extern_crate_name.as_str()); + value.push("="); + + let mut pass = |file| { + let mut value = value.clone(); + value.push(file); + result.push(OsString::from("--extern")); + result.push(value); + }; - let outputs = cx.outputs(&dep.unit)?; - let mut outputs = outputs.iter().filter_map(|output| match output.flavor { - FileFlavor::Linkable { rmeta } => Some((output, rmeta)), - _ => None, - }); + let outputs = cx.outputs(&dep.unit)?; - if cx.only_requires_rmeta(unit, &dep.unit) { - let (output, _rmeta) = outputs - .find(|(_output, rmeta)| *rmeta) - .expect("failed to find rlib dep for pipelined dep"); - pass(&output.path); - } else { - for (output, rmeta) in outputs { - if !rmeta { - pass(&output.path); + if cx.only_requires_rmeta(unit, &dep.unit) || dep.unit.mode.is_check() { + // Example: rlib dependency for an rlib, rmeta is all that is required. + let output = outputs + .iter() + .find(|output| output.flavor == FileFlavor::Rmeta) + .expect("failed to find rmeta dep for pipelined dep"); + pass(&output.path); + } else { + // Example: a bin needs `rlib` for dependencies, it cannot use rmeta. + for output in outputs.iter() { + if output.flavor == FileFlavor::Linkable { + pass(&output.path); + } } } - } - Ok(()) - }; + Ok(()) + }; for dep in deps { - if dep.unit.target.linkable() && !dep.unit.mode.is_doc() { + if dep.unit.target.is_linkable() && !dep.unit.mode.is_doc() { link_to(dep, dep.extern_crate_name, dep.noprelude)?; } } @@ -1093,9 +1120,9 @@ } impl OutputOptions { - fn new<'a>(cx: &Context<'a, '_>, unit: &Unit<'a>) -> OutputOptions { + fn new(cx: &Context<'_, '_>, unit: &Unit) -> OutputOptions { let look_for_metadata_directive = cx.rmeta_required(unit); - let color = cx.bcx.config.shell().supports_color(); + let color = cx.bcx.config.shell().err_supports_color(); let path = cx.files().message_cache_path(unit); // Remove old cache, ignore ENOENT, which is the common case. drop(fs::remove_file(&path)); @@ -1130,7 +1157,7 @@ // Check if caching is enabled. if let Some((path, cell)) = &mut options.cache_cell { // Cache the output, which will be replayed later when Fresh. - let f = cell.try_borrow_mut_with(|| File::create(path))?; + let f = cell.try_borrow_mut_with(|| paths::create(path))?; debug_assert!(!line.contains('\n')); f.write_all(line.as_bytes())?; f.write_all(&[b'\n'])?; @@ -1318,7 +1345,7 @@ // We sometimes have gigabytes of output from the compiler, so avoid // loading it all into memory at once, as that can cause OOM where // otherwise there would be none. - let file = fs::File::open(&path)?; + let file = paths::open(&path)?; let mut reader = std::io::BufReader::new(file); let mut line = String::new(); loop { diff -Nru cargo-0.44.1/src/cargo/core/compiler/output_depinfo.rs cargo-0.47.0/src/cargo/core/compiler/output_depinfo.rs --- cargo-0.44.1/src/cargo/core/compiler/output_depinfo.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/output_depinfo.rs 2020-07-17 20:39:39.000000000 +0000 @@ -23,7 +23,6 @@ //! be detected via changes to `Cargo.lock`. use std::collections::{BTreeSet, HashSet}; -use std::fs::File; use std::io::{BufWriter, Write}; use std::path::{Path, PathBuf}; @@ -48,13 +47,13 @@ .map(|f| f.replace(" ", "\\ ")) } -fn add_deps_for_unit<'a, 'b>( +fn add_deps_for_unit( deps: &mut BTreeSet, - cx: &mut Context<'a, 'b>, - unit: &Unit<'a>, - visited: &mut HashSet>, + cx: &mut Context<'_, '_>, + unit: &Unit, + visited: &mut HashSet, ) -> CargoResult<()> { - if !visited.insert(*unit) { + if !visited.insert(unit.clone()) { return Ok(()); } @@ -66,7 +65,7 @@ if let Some(paths) = fingerprint::parse_dep_info(unit.pkg.root(), cx.files().host_root(), &dep_info_loc)? { - for path in paths { + for path in paths.files { deps.insert(path); } } else { @@ -80,7 +79,7 @@ } // Add rerun-if-changed dependencies - if let Some(metadata) = cx.find_build_script_metadata(*unit) { + if let Some(metadata) = cx.find_build_script_metadata(unit.clone()) { if let Some(output) = cx .build_script_outputs .lock() @@ -96,8 +95,7 @@ // Recursively traverse all transitive dependencies let unit_deps = Vec::from(cx.unit_deps(unit)); // Create vec due to mutable borrow. for dep in unit_deps { - let source_id = dep.unit.pkg.package_id().source_id(); - if source_id.is_path() { + if unit.is_local() { add_deps_for_unit(deps, cx, &dep.unit, visited)?; } } @@ -107,7 +105,7 @@ /// Save a `.d` dep-info file for the given unit. /// /// This only saves files for uplifted artifacts. -pub fn output_depinfo<'a, 'b>(cx: &mut Context<'a, 'b>, unit: &Unit<'a>) -> CargoResult<()> { +pub fn output_depinfo(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<()> { let bcx = cx.bcx; let mut deps = BTreeSet::new(); let mut visited = HashSet::new(); @@ -133,7 +131,7 @@ for output in cx .outputs(unit)? .iter() - .filter(|o| o.flavor != FileFlavor::DebugInfo) + .filter(|o| !matches!(o.flavor, FileFlavor::DebugInfo | FileFlavor::Auxiliary)) { if let Some(ref link_dst) = output.hardlink { let output_path = link_dst.with_extension("d"); @@ -143,13 +141,13 @@ // If nothing changed don't recreate the file which could alter // its mtime if let Ok(previous) = fingerprint::parse_rustc_dep_info(&output_path) { - if previous.len() == 1 && previous[0].0 == target_fn && previous[0].1 == deps { + if previous.files.iter().eq(deps.iter().map(Path::new)) { continue; } } // Otherwise write it all out - let mut outfile = BufWriter::new(File::create(output_path)?); + let mut outfile = BufWriter::new(paths::create(output_path)?); write!(outfile, "{}:", target_fn)?; for dep in &deps { write!(outfile, " {}", dep)?; diff -Nru cargo-0.44.1/src/cargo/core/compiler/rustdoc.rs cargo-0.47.0/src/cargo/core/compiler/rustdoc.rs --- cargo-0.44.1/src/cargo/core/compiler/rustdoc.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/rustdoc.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,172 @@ +//! Utilities for building with rustdoc. + +use crate::core::compiler::context::Context; +use crate::core::compiler::unit::Unit; +use crate::core::compiler::CompileKind; +use crate::sources::CRATES_IO_REGISTRY; +use crate::util::errors::{internal, CargoResult}; +use crate::util::ProcessBuilder; +use std::collections::HashMap; +use std::fmt; +use std::hash; +use url::Url; + +/// Mode used for `std`. +#[derive(Debug, Hash)] +pub enum RustdocExternMode { + /// Use a local `file://` URL. + Local, + /// Use a remote URL to https://doc.rust-lang.org/ (default). + Remote, + /// An arbitrary URL. + Url(String), +} + +impl From for RustdocExternMode { + fn from(s: String) -> RustdocExternMode { + match s.as_ref() { + "local" => RustdocExternMode::Local, + "remote" => RustdocExternMode::Remote, + _ => RustdocExternMode::Url(s), + } + } +} + +impl fmt::Display for RustdocExternMode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + RustdocExternMode::Local => "local".fmt(f), + RustdocExternMode::Remote => "remote".fmt(f), + RustdocExternMode::Url(s) => s.fmt(f), + } + } +} + +impl<'de> serde::de::Deserialize<'de> for RustdocExternMode { + fn deserialize(deserializer: D) -> Result + where + D: serde::de::Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + Ok(s.into()) + } +} + +#[derive(serde::Deserialize, Debug)] +pub struct RustdocExternMap { + registries: HashMap, + std: Option, +} + +impl hash::Hash for RustdocExternMap { + fn hash(&self, into: &mut H) { + self.std.hash(into); + for (key, value) in &self.registries { + key.hash(into); + value.hash(into); + } + } +} + +pub fn add_root_urls( + cx: &Context<'_, '_>, + unit: &Unit, + rustdoc: &mut ProcessBuilder, +) -> CargoResult<()> { + let config = cx.bcx.config; + if !config.cli_unstable().rustdoc_map { + log::debug!("`doc.extern-map` ignored, requires -Zrustdoc-map flag"); + return Ok(()); + } + let map = config.doc_extern_map()?; + if map.registries.is_empty() && map.std.is_none() { + // Skip doing unnecessary work. + return Ok(()); + } + let mut unstable_opts = false; + // Collect mapping of registry name -> index url. + let name2url: HashMap<&String, Url> = map + .registries + .keys() + .filter_map(|name| { + if let Ok(index_url) = config.get_registry_index(name) { + Some((name, index_url)) + } else { + log::warn!( + "`doc.extern-map.{}` specifies a registry that is not defined", + name + ); + None + } + }) + .collect(); + for dep in cx.unit_deps(unit) { + if dep.unit.target.is_linkable() && !dep.unit.mode.is_doc() { + for (registry, location) in &map.registries { + let sid = dep.unit.pkg.package_id().source_id(); + let matches_registry = || -> bool { + if !sid.is_registry() { + return false; + } + if sid.is_default_registry() { + return registry == CRATES_IO_REGISTRY; + } + if let Some(index_url) = name2url.get(registry) { + return index_url == sid.url(); + } + false + }; + if matches_registry() { + let mut url = location.clone(); + if !url.contains("{pkg_name}") && !url.contains("{version}") { + if !url.ends_with('/') { + url.push('/'); + } + url.push_str("{pkg_name}/{version}/"); + } + let url = url + .replace("{pkg_name}", &dep.unit.pkg.name()) + .replace("{version}", &dep.unit.pkg.version().to_string()); + rustdoc.arg("--extern-html-root-url"); + rustdoc.arg(format!("{}={}", dep.unit.target.crate_name(), url)); + unstable_opts = true; + } + } + } + } + let std_url = match &map.std { + None | Some(RustdocExternMode::Remote) => None, + Some(RustdocExternMode::Local) => { + let sysroot = &cx.bcx.target_data.info(CompileKind::Host).sysroot; + let html_root = sysroot.join("share").join("doc").join("rust").join("html"); + if html_root.exists() { + let url = Url::from_file_path(&html_root).map_err(|()| { + internal(format!( + "`{}` failed to convert to URL", + html_root.display() + )) + })?; + Some(url.to_string()) + } else { + log::warn!( + "`doc.extern-map.std` is \"local\", but local docs don't appear to exist at {}", + html_root.display() + ); + None + } + } + Some(RustdocExternMode::Url(s)) => Some(s.to_string()), + }; + if let Some(url) = std_url { + for name in &["std", "core", "alloc", "proc_macro"] { + rustdoc.arg("--extern-html-root-url"); + rustdoc.arg(format!("{}={}", name, url)); + unstable_opts = true; + } + } + + if unstable_opts { + rustdoc.arg("-Zunstable-options"); + } + Ok(()) +} diff -Nru cargo-0.44.1/src/cargo/core/compiler/standard_lib.rs cargo-0.47.0/src/cargo/core/compiler/standard_lib.rs --- cargo-0.44.1/src/cargo/core/compiler/standard_lib.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/standard_lib.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,7 +1,8 @@ //! Code for building the standard library. -use crate::core::compiler::{BuildContext, CompileKind, CompileMode, RustcTargetData, Unit}; -use crate::core::profiles::UnitFor; +use crate::core::compiler::UnitInterner; +use crate::core::compiler::{CompileKind, CompileMode, RustcTargetData, Unit}; +use crate::core::profiles::{Profiles, UnitFor}; use crate::core::resolver::features::{FeaturesFor, ResolvedFeatures}; use crate::core::resolver::{HasDevUnits, ResolveOpts}; use crate::core::{Dependency, PackageId, PackageSet, Resolve, SourceId, Workspace}; @@ -33,7 +34,7 @@ pub fn resolve_std<'cfg>( ws: &Workspace<'cfg>, target_data: &RustcTargetData, - requested_target: CompileKind, + requested_targets: &[CompileKind], crates: &[String], ) -> CargoResult<(PackageSet<'cfg>, Resolve, ResolvedFeatures)> { let src_path = detect_sysroot_src_path(target_data)?; @@ -64,6 +65,7 @@ &Some(members), /*default_members*/ &None, /*exclude*/ &None, + /*custom_metadata*/ &None, )); let virtual_manifest = crate::core::VirtualManifest::new( /*replace*/ Vec::new(), @@ -71,6 +73,7 @@ ws_config, /*profiles*/ None, crate::core::Features::default(), + None, ); let config = ws.config(); @@ -105,10 +108,11 @@ let resolve = ops::resolve_ws_with_opts( &std_ws, target_data, - requested_target, + requested_targets, &opts, &specs, HasDevUnits::No, + crate::core::resolver::features::ForceAllTargets::No, )?; Ok(( resolve.pkg_set, @@ -120,47 +124,58 @@ /// Generate a list of root `Unit`s for the standard library. /// /// The given slice of crate names is the root set. -pub fn generate_std_roots<'a>( - bcx: &BuildContext<'a, '_>, +pub fn generate_std_roots( crates: &[String], - std_resolve: &'a Resolve, + std_resolve: &Resolve, std_features: &ResolvedFeatures, - kind: CompileKind, -) -> CargoResult>> { + kinds: &[CompileKind], + package_set: &PackageSet<'_>, + interner: &UnitInterner, + profiles: &Profiles, +) -> CargoResult>> { // Generate the root Units for the standard library. let std_ids = crates .iter() .map(|crate_name| std_resolve.query(crate_name)) .collect::>>()?; // Convert PackageId to Package. - let std_pkgs = bcx.packages.get_many(std_ids)?; - // Generate a list of Units. - std_pkgs - .into_iter() - .map(|pkg| { - let lib = pkg - .targets() - .iter() - .find(|t| t.is_lib()) - .expect("std has a lib"); - let unit_for = UnitFor::new_normal(); - // I don't think we need to bother with Check here, the difference - // in time is minimal, and the difference in caching is - // significant. - let mode = CompileMode::Build; - let profile = bcx.profiles.get_profile( - pkg.package_id(), - /*is_member*/ false, - unit_for, + let std_pkgs = package_set.get_many(std_ids)?; + // Generate a map of Units for each kind requested. + let mut ret = HashMap::new(); + for pkg in std_pkgs { + let lib = pkg + .targets() + .iter() + .find(|t| t.is_lib()) + .expect("std has a lib"); + let unit_for = UnitFor::new_normal(); + // I don't think we need to bother with Check here, the difference + // in time is minimal, and the difference in caching is + // significant. + let mode = CompileMode::Build; + let profile = profiles.get_profile( + pkg.package_id(), + /*is_member*/ false, + /*is_local*/ false, + unit_for, + mode, + ); + let features = std_features.activated_features(pkg.package_id(), FeaturesFor::NormalOrDev); + + for kind in kinds { + let list = ret.entry(*kind).or_insert_with(Vec::new); + list.push(interner.intern( + pkg, + lib, + profile, + *kind, mode, - ); - let features = - std_features.activated_features(pkg.package_id(), FeaturesFor::NormalOrDev); - Ok(bcx.units.intern( - pkg, lib, profile, kind, mode, features, /*is_std*/ true, - )) - }) - .collect::>>() + features.clone(), + /*is_std*/ true, + )); + } + } + Ok(ret) } fn detect_sysroot_src_path(target_data: &RustcTargetData) -> CargoResult { diff -Nru cargo-0.44.1/src/cargo/core/compiler/timings.rs cargo-0.47.0/src/cargo/core/compiler/timings.rs --- cargo-0.44.1/src/cargo/core/compiler/timings.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/timings.rs 2020-07-17 20:39:39.000000000 +0000 @@ -8,13 +8,12 @@ use crate::core::PackageId; use crate::util::cpu::State; use crate::util::machine_message::{self, Message}; -use crate::util::{paths, CargoResult, Config}; +use crate::util::{paths, CargoResult, CargoResultExt, Config}; use std::collections::HashMap; -use std::fs::File; use std::io::{BufWriter, Write}; use std::time::{Duration, Instant, SystemTime}; -pub struct Timings<'a, 'cfg> { +pub struct Timings<'cfg> { config: &'cfg Config, /// Whether or not timings should be captured. enabled: bool, @@ -39,10 +38,10 @@ /// Total number of dirty units. total_dirty: u32, /// Time tracking for each individual unit. - unit_times: Vec>, + unit_times: Vec, /// Units that are in the process of being built. /// When they finished, they are moved to `unit_times`. - active: HashMap>, + active: HashMap, /// Concurrency-tracking information. This is periodically updated while /// compilation progresses. concurrency: Vec, @@ -56,8 +55,8 @@ } /// Tracking information for an individual unit. -struct UnitTime<'a> { - unit: Unit<'a>, +struct UnitTime { + unit: Unit, /// A string describing the cargo target. target: String, /// The time when this unit started as an offset in seconds from `Timings::start`. @@ -68,9 +67,9 @@ /// from `start`. rmeta_time: Option, /// Reverse deps that are freed to run after this unit finished. - unlocked_units: Vec>, + unlocked_units: Vec, /// Same as `unlocked_units`, but unlocked by rmeta. - unlocked_rmeta_units: Vec>, + unlocked_rmeta_units: Vec, } /// Periodic concurrency tracking information. @@ -91,8 +90,8 @@ rustc_parallelism: usize, } -impl<'a, 'cfg> Timings<'a, 'cfg> { - pub fn new(bcx: &BuildContext<'a, 'cfg>, root_units: &[Unit<'_>]) -> Timings<'a, 'cfg> { +impl<'cfg> Timings<'cfg> { + pub fn new(bcx: &BuildContext<'_, 'cfg>, root_units: &[Unit]) -> Timings<'cfg> { let has_report = |what| { bcx.config .cli_unstable() @@ -122,6 +121,17 @@ .collect(); let start_str = humantime::format_rfc3339_seconds(SystemTime::now()).to_string(); let profile = bcx.build_config.requested_profile.to_string(); + let last_cpu_state = if enabled { + match State::current() { + Ok(state) => Some(state), + Err(e) => { + log::info!("failed to get CPU state, CPU tracking disabled: {:?}", e); + None + } + } + } else { + None + }; Timings { config: bcx.config, @@ -138,14 +148,14 @@ unit_times: Vec::new(), active: HashMap::new(), concurrency: Vec::new(), - last_cpu_state: if enabled { State::current().ok() } else { None }, + last_cpu_state, last_cpu_recording: Instant::now(), cpu_usage: Vec::new(), } } /// Mark that a unit has started running. - pub fn unit_start(&mut self, id: JobId, unit: Unit<'a>) { + pub fn unit_start(&mut self, id: JobId, unit: Unit) { if !self.enabled { return; } @@ -179,7 +189,7 @@ } /// Mark that the `.rmeta` file as generated. - pub fn unit_rmeta_finished(&mut self, id: JobId, unlocked: Vec<&Unit<'a>>) { + pub fn unit_rmeta_finished(&mut self, id: JobId, unlocked: Vec<&Unit>) { if !self.enabled { return; } @@ -193,11 +203,13 @@ let t = d_as_f64(self.start.elapsed()); unit_time.rmeta_time = Some(t - unit_time.start); assert!(unit_time.unlocked_rmeta_units.is_empty()); - unit_time.unlocked_rmeta_units.extend(unlocked); + unit_time + .unlocked_rmeta_units + .extend(unlocked.iter().cloned().cloned()); } /// Mark that a unit has finished running. - pub fn unit_finished(&mut self, id: JobId, unlocked: Vec<&Unit<'a>>) { + pub fn unit_finished(&mut self, id: JobId, unlocked: Vec<&Unit>) { if !self.enabled { return; } @@ -209,7 +221,9 @@ let t = d_as_f64(self.start.elapsed()); unit_time.duration = t - unit_time.start; assert!(unit_time.unlocked_units.is_empty()); - unit_time.unlocked_units.extend(unlocked); + unit_time + .unlocked_units + .extend(unlocked.iter().cloned().cloned()); if self.report_info { let msg = format!( "{}{} in {:.1}s", @@ -225,13 +239,13 @@ if self.report_json { let msg = machine_message::TimingInfo { package_id: unit_time.unit.pkg.package_id(), - target: unit_time.unit.target, + target: &unit_time.unit.target, mode: unit_time.unit.mode, duration: unit_time.duration, rmeta_time: unit_time.rmeta_time, } .to_json_string(); - self.config.shell().stdout_println(msg); + crate::drop_println!(self.config, "{}", msg); } self.unit_times.push(unit_time); } @@ -283,7 +297,10 @@ } let current = match State::current() { Ok(s) => s, - Err(_) => return, + Err(e) => { + log::info!("failed to get CPU state: {:?}", e); + return; + } }; let pct_idle = current.idle_since(prev); *prev = current; @@ -305,7 +322,8 @@ self.unit_times .sort_unstable_by(|a, b| a.start.partial_cmp(&b.start).unwrap()); if self.report_html { - self.report_html(bcx, error)?; + self.report_html(bcx, error) + .chain_err(|| "failed to save timing report")?; } Ok(()) } @@ -319,7 +337,7 @@ let duration = d_as_f64(self.start.elapsed()); let timestamp = self.start_str.replace(&['-', ':'][..], ""); let filename = format!("cargo-timing-{}.html", timestamp); - let mut f = BufWriter::new(File::create(&filename)?); + let mut f = BufWriter::new(paths::create(&filename)?); let roots: Vec<&str> = self .root_targets .iter() @@ -456,11 +474,11 @@ fn write_js_data(&self, f: &mut impl Write) -> CargoResult<()> { // Create a map to link indices of unlocked units. - let unit_map: HashMap, usize> = self + let unit_map: HashMap = self .unit_times .iter() .enumerate() - .map(|(i, ut)| (ut.unit, i)) + .map(|(i, ut)| (ut.unit.clone(), i)) .collect(); #[derive(serde::Serialize)] struct UnitData { @@ -551,7 +569,7 @@ "# )?; - let mut units: Vec<&UnitTime<'_>> = self.unit_times.iter().collect(); + let mut units: Vec<&UnitTime> = self.unit_times.iter().collect(); units.sort_unstable_by(|a, b| b.duration.partial_cmp(&a.duration).unwrap()); for (i, unit) in units.iter().enumerate() { let codegen = match unit.codegen_time() { @@ -583,7 +601,7 @@ } } -impl<'a> UnitTime<'a> { +impl UnitTime { /// Returns the codegen time as (rmeta_time, codegen_time, percent of total) fn codegen_time(&self) -> Option<(f64, f64, f64)> { self.rmeta_time.map(|rmeta_time| { @@ -610,7 +628,13 @@ .lines() .next() .expect("rustc version"); - let requested_target = bcx.target_data.short_name(&bcx.build_config.requested_kind); + let requested_target = bcx + .build_config + .requested_kinds + .iter() + .map(|kind| bcx.target_data.short_name(kind)) + .collect::>() + .join(", "); format!( "{}
Host: {}
Target: {}", version, diff -Nru cargo-0.44.1/src/cargo/core/compiler/unit_dependencies.rs cargo-0.47.0/src/cargo/core/compiler/unit_dependencies.rs --- cargo-0.44.1/src/cargo/core/compiler/unit_dependencies.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/unit_dependencies.rs 2020-07-17 20:39:39.000000000 +0000 @@ -15,44 +15,27 @@ //! (for example, with and without tests), so we actually build a dependency //! graph of `Unit`s, which capture these properties. -use crate::core::compiler::Unit; -use crate::core::compiler::{BuildContext, CompileKind, CompileMode}; +use crate::core::compiler::unit_graph::{UnitDep, UnitGraph}; +use crate::core::compiler::UnitInterner; +use crate::core::compiler::{CompileKind, CompileMode, RustcTargetData, Unit}; use crate::core::dependency::DepKind; -use crate::core::package::Downloads; -use crate::core::profiles::{Profile, UnitFor}; +use crate::core::profiles::{Profile, Profiles, UnitFor}; use crate::core::resolver::features::{FeaturesFor, ResolvedFeatures}; use crate::core::resolver::Resolve; -use crate::core::{InternedString, Package, PackageId, Target}; +use crate::core::{Package, PackageId, PackageSet, Target, Workspace}; +use crate::ops::resolve_all_features; +use crate::util::interning::InternedString; +use crate::util::Config; use crate::CargoResult; use log::trace; use std::collections::{HashMap, HashSet}; -/// The dependency graph of Units. -pub type UnitGraph<'a> = HashMap, Vec>>; - -/// A unit dependency. -#[derive(Debug, Clone, Hash, Eq, PartialEq, PartialOrd, Ord)] -pub struct UnitDep<'a> { - /// The dependency unit. - pub unit: Unit<'a>, - /// The purpose of this dependency (a dependency for a test, or a build - /// script, etc.). - pub unit_for: UnitFor, - /// The name the parent uses to refer to this dependency. - pub extern_crate_name: InternedString, - /// Whether or not this is a public dependency. - pub public: bool, - /// If `true`, the dependency should not be added to Rust's prelude. - pub noprelude: bool, -} - /// Collection of stuff used while creating the `UnitGraph`. struct State<'a, 'cfg> { - bcx: &'a BuildContext<'a, 'cfg>, - waiting_on_download: HashSet, - downloads: Downloads<'a, 'cfg>, - unit_dependencies: UnitGraph<'a>, - package_cache: HashMap, + ws: &'a Workspace<'cfg>, + config: &'cfg Config, + unit_dependencies: UnitGraph, + package_set: &'a PackageSet<'cfg>, usr_resolve: &'a Resolve, usr_features: &'a ResolvedFeatures, std_resolve: Option<&'a Resolve>, @@ -60,31 +43,43 @@ /// This flag is `true` while generating the dependencies for the standard /// library. is_std: bool, + global_mode: CompileMode, + target_data: &'a RustcTargetData, + profiles: &'a Profiles, + interner: &'a UnitInterner, } pub fn build_unit_dependencies<'a, 'cfg>( - bcx: &'a BuildContext<'a, 'cfg>, + ws: &'a Workspace<'cfg>, + package_set: &'a PackageSet<'cfg>, resolve: &'a Resolve, features: &'a ResolvedFeatures, std_resolve: Option<&'a (Resolve, ResolvedFeatures)>, - roots: &[Unit<'a>], - std_roots: &[Unit<'a>], -) -> CargoResult> { + roots: &[Unit], + std_roots: &HashMap>, + global_mode: CompileMode, + target_data: &'a RustcTargetData, + profiles: &'a Profiles, + interner: &'a UnitInterner, +) -> CargoResult { let (std_resolve, std_features) = match std_resolve { Some((r, f)) => (Some(r), Some(f)), None => (None, None), }; let mut state = State { - bcx, - downloads: bcx.packages.enable_download()?, - waiting_on_download: HashSet::new(), + ws, + config: ws.config(), unit_dependencies: HashMap::new(), - package_cache: HashMap::new(), + package_set, usr_resolve: resolve, usr_features: features, std_resolve, std_features, is_std: false, + global_mode, + target_data, + profiles, + interner, }; let std_unit_deps = calc_deps_of_std(&mut state, std_roots)?; @@ -112,34 +107,33 @@ } /// Compute all the dependencies for the standard library. -fn calc_deps_of_std<'a, 'cfg>( - mut state: &mut State<'a, 'cfg>, - std_roots: &[Unit<'a>], -) -> CargoResult>> { +fn calc_deps_of_std( + mut state: &mut State<'_, '_>, + std_roots: &HashMap>, +) -> CargoResult> { if std_roots.is_empty() { return Ok(None); } // Compute dependencies for the standard library. state.is_std = true; - deps_of_roots(std_roots, &mut state)?; + for roots in std_roots.values() { + deps_of_roots(roots, &mut state)?; + } state.is_std = false; - Ok(Some(std::mem::replace( - &mut state.unit_dependencies, - HashMap::new(), - ))) + Ok(Some(std::mem::take(&mut state.unit_dependencies))) } /// Add the standard library units to the `unit_dependencies`. -fn attach_std_deps<'a, 'cfg>( - state: &mut State<'a, 'cfg>, - std_roots: &[Unit<'a>], - std_unit_deps: UnitGraph<'a>, +fn attach_std_deps( + state: &mut State<'_, '_>, + std_roots: &HashMap>, + std_unit_deps: UnitGraph, ) { // Attach the standard library as a dependency of every target unit. for (unit, deps) in state.unit_dependencies.iter_mut() { if !unit.kind.is_host() && !unit.mode.is_run_custom_build() { - deps.extend(std_roots.iter().map(|unit| UnitDep { - unit: *unit, + deps.extend(std_roots[&unit.kind].iter().map(|unit| UnitDep { + unit: unit.clone(), unit_for: UnitFor::new_normal(), extern_crate_name: unit.pkg.name(), // TODO: Does this `public` make sense? @@ -158,52 +152,36 @@ /// Compute all the dependencies of the given root units. /// The result is stored in state.unit_dependencies. -fn deps_of_roots<'a, 'cfg>(roots: &[Unit<'a>], mut state: &mut State<'a, 'cfg>) -> CargoResult<()> { - // Loop because we are downloading while building the dependency graph. - // The partially-built unit graph is discarded through each pass of the - // loop because it is incomplete because not all required Packages have - // been downloaded. - loop { - for unit in roots.iter() { - state.get(unit.pkg.package_id())?; - - // Dependencies of tests/benches should not have `panic` set. - // We check the global test mode to see if we are running in `cargo - // test` in which case we ensure all dependencies have `panic` - // cleared, and avoid building the lib thrice (once with `panic`, once - // without, once for `--test`). In particular, the lib included for - // Doc tests and examples are `Build` mode here. - let unit_for = if unit.mode.is_any_test() || state.bcx.build_config.test() { - UnitFor::new_test(state.bcx.config) - } else if unit.target.is_custom_build() { - // This normally doesn't happen, except `clean` aggressively - // generates all units. - UnitFor::new_build(false) - } else if unit.target.for_host() { - // Proc macro / plugin should never have panic set. - UnitFor::new_compiler() - } else { - UnitFor::new_normal() - }; - deps_of(unit, &mut state, unit_for)?; - } - - if !state.waiting_on_download.is_empty() { - state.finish_some_downloads()?; - state.unit_dependencies.clear(); +fn deps_of_roots(roots: &[Unit], mut state: &mut State<'_, '_>) -> CargoResult<()> { + for unit in roots.iter() { + // Dependencies of tests/benches should not have `panic` set. + // We check the global test mode to see if we are running in `cargo + // test` in which case we ensure all dependencies have `panic` + // cleared, and avoid building the lib thrice (once with `panic`, once + // without, once for `--test`). In particular, the lib included for + // Doc tests and examples are `Build` mode here. + let unit_for = if unit.mode.is_any_test() || state.global_mode.is_rustc_test() { + UnitFor::new_test(state.config) + } else if unit.target.is_custom_build() { + // This normally doesn't happen, except `clean` aggressively + // generates all units. + UnitFor::new_host(false) + } else if unit.target.proc_macro() { + UnitFor::new_host(true) + } else if unit.target.for_host() { + // Plugin should never have panic set. + UnitFor::new_compiler() } else { - break; - } + UnitFor::new_normal() + }; + deps_of(unit, &mut state, unit_for)?; } + Ok(()) } /// Compute the dependencies of a single unit. -fn deps_of<'a, 'cfg>( - unit: &Unit<'a>, - state: &mut State<'a, 'cfg>, - unit_for: UnitFor, -) -> CargoResult<()> { +fn deps_of(unit: &Unit, state: &mut State<'_, '_>, unit_for: UnitFor) -> CargoResult<()> { // Currently the `unit_dependencies` map does not include `unit_for`. This should // be safe for now. `TestDependency` only exists to clear the `panic` // flag, and you'll never ask for a `unit` with `panic` set as a @@ -212,7 +190,9 @@ // affect anything else in the hierarchy. if !state.unit_dependencies.contains_key(unit) { let unit_deps = compute_deps(unit, state, unit_for)?; - state.unit_dependencies.insert(*unit, unit_deps.clone()); + state + .unit_dependencies + .insert(unit.clone(), unit_deps.clone()); for unit_dep in unit_deps { deps_of(&unit_dep.unit, state, unit_dep.unit_for)?; } @@ -224,11 +204,11 @@ /// for that package. /// This returns a `Vec` of `(Unit, UnitFor)` pairs. The `UnitFor` /// is the profile type that should be used for dependencies of the unit. -fn compute_deps<'a, 'cfg>( - unit: &Unit<'a>, - state: &mut State<'a, 'cfg>, +fn compute_deps( + unit: &Unit, + state: &mut State<'_, '_>, unit_for: UnitFor, -) -> CargoResult>> { +) -> CargoResult> { if unit.mode.is_run_custom_build() { return compute_deps_custom_build(unit, unit_for, state); } else if unit.mode.is_doc() { @@ -236,7 +216,6 @@ return compute_deps_doc(unit, state); } - let bcx = state.bcx; let id = unit.pkg.package_id(); let filtered_deps = state.resolve().deps(id).filter(|&(_id, deps)| { assert!(!deps.is_empty()); @@ -260,7 +239,7 @@ // If this dependency is only available for certain platforms, // make sure we're only enabling it for that platform. - if !bcx.target_data.dep_platform_activated(dep, unit.kind) { + if !state.target_data.dep_platform_activated(dep, unit.kind) { return false; } @@ -285,10 +264,7 @@ let mut ret = Vec::new(); for (id, _) in filtered_deps { - let pkg = match state.get(id)? { - Some(pkg) => pkg, - None => continue, - }; + let pkg = state.get(id); let lib = match pkg.targets().iter().find(|t| t.is_lib()) { Some(t) => t, None => continue, @@ -297,9 +273,10 @@ let dep_unit_for = unit_for .with_for_host(lib.for_host()) // If it is a custom build script, then it *only* has build dependencies. - .with_build_dep(unit.target.is_custom_build()); + .with_host_features(unit.target.is_custom_build() || lib.proc_macro()); - if bcx.config.cli_unstable().dual_proc_macros && lib.proc_macro() && !unit.kind.is_host() { + if state.config.cli_unstable().dual_proc_macros && lib.proc_macro() && !unit.kind.is_host() + { let unit_dep = new_unit_dep(state, unit, pkg, lib, dep_unit_for, unit.kind, mode)?; ret.push(unit_dep); let unit_dep = @@ -347,26 +324,33 @@ .targets() .iter() .filter(|t| { - let no_required_features = Vec::new(); - - t.is_bin() && - // Skip binaries with required features that have not been selected. - t.required_features().unwrap_or(&no_required_features).iter().all(|f| { - unit.features.contains(&InternedString::new(f.as_str())) - }) + // Skip binaries with required features that have not been selected. + match t.required_features() { + Some(rf) if t.is_bin() => { + let features = resolve_all_features( + state.resolve(), + state.features(), + state.package_set, + id, + ); + rf.iter().all(|f| features.contains(f)) + } + None if t.is_bin() => true, + _ => false, + } }) .map(|t| { new_unit_dep( state, unit, - unit.pkg, + &unit.pkg, t, UnitFor::new_normal(), unit.kind.for_target(t), CompileMode::Build, ) }) - .collect::>>>()?, + .collect::>>()?, ); } @@ -377,20 +361,25 @@ /// /// The `unit` provided must represent an execution of a build script, and /// the returned set of units must all be run before `unit` is run. -fn compute_deps_custom_build<'a, 'cfg>( - unit: &Unit<'a>, +fn compute_deps_custom_build( + unit: &Unit, unit_for: UnitFor, - state: &mut State<'a, 'cfg>, -) -> CargoResult>> { + state: &mut State<'_, '_>, +) -> CargoResult> { if let Some(links) = unit.pkg.manifest().links() { - if state.bcx.script_override(links, unit.kind).is_some() { + if state + .target_data + .script_override(links, unit.kind) + .is_some() + { // Overridden build scripts don't have any dependencies. return Ok(Vec::new()); } } - // All dependencies of this unit should use profiles for custom - // builds. - let script_unit_for = UnitFor::new_build(unit_for.is_for_build_dep()); + // All dependencies of this unit should use profiles for custom builds. + // If this is a build script of a proc macro, make sure it uses host + // features. + let script_unit_for = UnitFor::new_host(unit_for.is_for_host_features()); // When not overridden, then the dependencies to run a build script are: // // 1. Compiling the build script itself. @@ -403,8 +392,8 @@ let unit_dep = new_unit_dep( state, unit, - unit.pkg, - unit.target, + &unit.pkg, + &unit.target, script_unit_for, // Build scripts always compiled for the host. CompileKind::Host, @@ -414,17 +403,14 @@ } /// Returns the dependencies necessary to document a package. -fn compute_deps_doc<'a, 'cfg>( - unit: &Unit<'a>, - state: &mut State<'a, 'cfg>, -) -> CargoResult>> { - let bcx = state.bcx; +fn compute_deps_doc(unit: &Unit, state: &mut State<'_, '_>) -> CargoResult> { + let target_data = state.target_data; let deps = state .resolve() .deps(unit.pkg.package_id()) .filter(|&(_id, deps)| { deps.iter().any(|dep| match dep.kind() { - DepKind::Normal => bcx.target_data.dep_platform_activated(dep, unit.kind), + DepKind::Normal => target_data.dep_platform_activated(dep, unit.kind), _ => false, }) }); @@ -434,10 +420,7 @@ // the documentation of the library being built. let mut ret = Vec::new(); for (id, _deps) in deps { - let dep = match state.get(id)? { - Some(dep) => dep, - None => continue, - }; + let dep = state.get(id); let lib = match dep.targets().iter().find(|t| t.is_lib()) { Some(lib) => lib, None => continue, @@ -445,7 +428,9 @@ // Rustdoc only needs rmeta files for regular dependencies. // However, for plugins/proc macros, deps should be built like normal. let mode = check_or_build_mode(unit.mode, lib); - let dep_unit_for = UnitFor::new_normal().with_for_host(lib.for_host()); + let dep_unit_for = UnitFor::new_normal() + .with_for_host(lib.for_host()) + .with_host_features(lib.proc_macro()); let lib_unit_dep = new_unit_dep( state, unit, @@ -481,21 +466,21 @@ Ok(ret) } -fn maybe_lib<'a>( - unit: &Unit<'a>, - state: &mut State<'a, '_>, +fn maybe_lib( + unit: &Unit, + state: &mut State<'_, '_>, unit_for: UnitFor, -) -> CargoResult>> { +) -> CargoResult> { unit.pkg .targets() .iter() - .find(|t| t.linkable()) + .find(|t| t.is_linkable()) .map(|t| { let mode = check_or_build_mode(unit.mode, t); new_unit_dep( state, unit, - unit.pkg, + &unit.pkg, t, unit_for, unit.kind.for_target(t), @@ -512,11 +497,11 @@ /// script itself doesn't have any dependencies, so even in that case a unit /// of work is still returned. `None` is only returned if the package has no /// build script. -fn dep_build_script<'a>( - unit: &Unit<'a>, +fn dep_build_script( + unit: &Unit, unit_for: UnitFor, - state: &State<'a, '_>, -) -> CargoResult>> { + state: &State<'_, '_>, +) -> CargoResult> { unit.pkg .targets() .iter() @@ -524,40 +509,37 @@ .map(|t| { // The profile stored in the Unit is the profile for the thing // the custom build script is running for. - let profile = state - .bcx - .profiles - .get_profile_run_custom_build(&unit.profile); - // UnitFor::new_build is used because we want the `host` flag set + let profile = state.profiles.get_profile_run_custom_build(&unit.profile); + // UnitFor::new_host is used because we want the `host` flag set // for all of our build dependencies (so they all get // build-override profiles), including compiling the build.rs // script itself. // - // If `is_for_build_dep` here is `false`, that means we are a + // If `is_for_host_features` here is `false`, that means we are a // build.rs script for a normal dependency and we want to set the // CARGO_FEATURE_* environment variables to the features as a // normal dep. // - // If `is_for_build_dep` here is `true`, that means that this - // package is being used as a build dependency, and so we only - // want to set CARGO_FEATURE_* variables for the build-dependency + // If `is_for_host_features` here is `true`, that means that this + // package is being used as a build dependency or proc-macro, and + // so we only want to set CARGO_FEATURE_* variables for the host // side of the graph. // // Keep in mind that the RunCustomBuild unit and the Compile // build.rs unit use the same features. This is because some // people use `cfg!` and `#[cfg]` expressions to check for enabled // features instead of just checking `CARGO_FEATURE_*` at runtime. - // In the case with `-Zfeatures=build_dep`, and a shared + // In the case with `-Zfeatures=host_dep`, and a shared // dependency has different features enabled for normal vs. build, // then the build.rs script will get compiled twice. I believe it // is not feasible to only build it once because it would break a // large number of scripts (they would think they have the wrong // set of features enabled). - let script_unit_for = UnitFor::new_build(unit_for.is_for_build_dep()); + let script_unit_for = UnitFor::new_host(unit_for.is_for_host_features()); new_unit_dep_with_profile( state, unit, - unit.pkg, + &unit.pkg, t, script_unit_for, unit.kind, @@ -587,34 +569,36 @@ } /// Create a new Unit for a dependency from `parent` to `pkg` and `target`. -fn new_unit_dep<'a>( - state: &State<'a, '_>, - parent: &Unit<'a>, - pkg: &'a Package, - target: &'a Target, +fn new_unit_dep( + state: &State<'_, '_>, + parent: &Unit, + pkg: &Package, + target: &Target, unit_for: UnitFor, kind: CompileKind, mode: CompileMode, -) -> CargoResult> { - let profile = state.bcx.profiles.get_profile( +) -> CargoResult { + let is_local = pkg.package_id().source_id().is_path() && !state.is_std; + let profile = state.profiles.get_profile( pkg.package_id(), - state.bcx.ws.is_member(pkg), + state.ws.is_member(pkg), + is_local, unit_for, mode, ); new_unit_dep_with_profile(state, parent, pkg, target, unit_for, kind, mode, profile) } -fn new_unit_dep_with_profile<'a>( - state: &State<'a, '_>, - parent: &Unit<'a>, - pkg: &'a Package, - target: &'a Target, +fn new_unit_dep_with_profile( + state: &State<'_, '_>, + parent: &Unit, + pkg: &Package, + target: &Target, unit_for: UnitFor, kind: CompileKind, mode: CompileMode, profile: Profile, -) -> CargoResult> { +) -> CargoResult { // TODO: consider making extern_crate_name return InternedString? let extern_crate_name = InternedString::new(&state.resolve().extern_crate_name( parent.pkg.package_id(), @@ -627,8 +611,7 @@ let features_for = unit_for.map_to_features_for(); let features = state.activated_features(pkg.package_id(), features_for); let unit = state - .bcx - .units + .interner .intern(pkg, target, profile, kind, mode, features, state.is_std); Ok(UnitDep { unit, @@ -649,7 +632,7 @@ /// /// Here we take the entire `deps` map and add more dependencies from execution /// of one build script to execution of another build script. -fn connect_run_custom_build_deps(unit_dependencies: &mut UnitGraph<'_>) { +fn connect_run_custom_build_deps(unit_dependencies: &mut UnitGraph) { let mut new_deps = Vec::new(); { @@ -663,7 +646,7 @@ for dep in deps { if dep.unit.mode == CompileMode::RunCustomBuild { reverse_deps_map - .entry(dep.unit) + .entry(dep.unit.clone()) .or_insert_with(HashSet::new) .insert(unit); } @@ -696,7 +679,7 @@ // Only deps with `links`. .filter(|other| { other.unit.pkg != unit.pkg - && other.unit.target.linkable() + && other.unit.target.is_linkable() && other.unit.pkg.manifest().links().is_some() }) // Get the RunCustomBuild for other lib. @@ -710,7 +693,7 @@ if !to_add.is_empty() { // (RunCustomBuild, set(other RunCustomBuild)) - new_deps.push((*unit, to_add)); + new_deps.push((unit.clone(), to_add)); } } } @@ -730,57 +713,26 @@ } } + fn features(&self) -> &'a ResolvedFeatures { + if self.is_std { + self.std_features.unwrap() + } else { + self.usr_features + } + } + fn activated_features( &self, pkg_id: PackageId, features_for: FeaturesFor, ) -> Vec { - let features = if self.is_std { - self.std_features.unwrap() - } else { - self.usr_features - }; + let features = self.features(); features.activated_features(pkg_id, features_for) } - fn get(&mut self, id: PackageId) -> CargoResult> { - if let Some(pkg) = self.package_cache.get(&id) { - return Ok(Some(pkg)); - } - if !self.waiting_on_download.insert(id) { - return Ok(None); - } - if let Some(pkg) = self.downloads.start(id)? { - self.package_cache.insert(id, pkg); - self.waiting_on_download.remove(&id); - return Ok(Some(pkg)); - } - Ok(None) - } - - /// Completes at least one downloading, maybe waiting for more to complete. - /// - /// This function will block the current thread waiting for at least one - /// crate to finish downloading. The function may continue to download more - /// crates if it looks like there's a long enough queue of crates to keep - /// downloading. When only a handful of packages remain this function - /// returns, and it's hoped that by returning we'll be able to push more - /// packages to download into the queue. - fn finish_some_downloads(&mut self) -> CargoResult<()> { - assert!(self.downloads.remaining() > 0); - loop { - let pkg = self.downloads.wait()?; - self.waiting_on_download.remove(&pkg.package_id()); - self.package_cache.insert(pkg.package_id(), pkg); - - // Arbitrarily choose that 5 or more packages concurrently download - // is a good enough number to "fill the network pipe". If we have - // less than this let's recompute the whole unit dependency graph - // again and try to find some more packages to download. - if self.downloads.remaining() < 5 { - break; - } - } - Ok(()) + fn get(&self, id: PackageId) -> &'a Package { + self.package_set + .get_one(id) + .unwrap_or_else(|_| panic!("expected {} to be downloaded", id)) } } diff -Nru cargo-0.44.1/src/cargo/core/compiler/unit_graph.rs cargo-0.47.0/src/cargo/core/compiler/unit_graph.rs --- cargo-0.44.1/src/cargo/core/compiler/unit_graph.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/unit_graph.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,119 @@ +use crate::core::compiler::Unit; +use crate::core::compiler::{CompileKind, CompileMode}; +use crate::core::profiles::{Profile, UnitFor}; +use crate::core::{nightly_features_allowed, PackageId, Target}; +use crate::util::interning::InternedString; +use crate::util::CargoResult; +use std::collections::HashMap; +use std::io::Write; + +/// The dependency graph of Units. +pub type UnitGraph = HashMap>; + +/// A unit dependency. +#[derive(Debug, Clone, Hash, Eq, PartialEq, PartialOrd, Ord)] +pub struct UnitDep { + /// The dependency unit. + pub unit: Unit, + /// The purpose of this dependency (a dependency for a test, or a build + /// script, etc.). + pub unit_for: UnitFor, + /// The name the parent uses to refer to this dependency. + pub extern_crate_name: InternedString, + /// Whether or not this is a public dependency. + pub public: bool, + /// If `true`, the dependency should not be added to Rust's prelude. + pub noprelude: bool, +} + +const VERSION: u32 = 1; + +#[derive(serde::Serialize)] +struct SerializedUnitGraph<'a> { + version: u32, + units: Vec>, + roots: Vec, +} + +#[derive(serde::Serialize)] +struct SerializedUnit<'a> { + pkg_id: PackageId, + target: &'a Target, + profile: &'a Profile, + platform: CompileKind, + mode: CompileMode, + features: &'a Vec, + #[serde(skip_serializing_if = "std::ops::Not::not")] // hide for unstable build-std + is_std: bool, + dependencies: Vec, +} + +#[derive(serde::Serialize)] +struct SerializedUnitDep { + index: usize, + extern_crate_name: InternedString, + // This is only set on nightly since it is unstable. + #[serde(skip_serializing_if = "Option::is_none")] + public: Option, + // This is only set on nightly since it is unstable. + #[serde(skip_serializing_if = "Option::is_none")] + noprelude: Option, + // Intentionally not including `unit_for` because it is a low-level + // internal detail that is mostly used for building the graph. +} + +pub fn emit_serialized_unit_graph(root_units: &[Unit], unit_graph: &UnitGraph) -> CargoResult<()> { + let is_nightly = nightly_features_allowed(); + let mut units: Vec<(&Unit, &Vec)> = unit_graph.iter().collect(); + units.sort_unstable(); + // Create a map for quick lookup for dependencies. + let indices: HashMap<&Unit, usize> = units + .iter() + .enumerate() + .map(|(i, val)| (val.0, i)) + .collect(); + let roots = root_units.iter().map(|root| indices[root]).collect(); + let ser_units = units + .iter() + .map(|(unit, unit_deps)| { + let dependencies = unit_deps + .iter() + .map(|unit_dep| { + // https://github.com/rust-lang/rust/issues/64260 when stabilized. + let (public, noprelude) = if is_nightly { + (Some(unit_dep.public), Some(unit_dep.noprelude)) + } else { + (None, None) + }; + SerializedUnitDep { + index: indices[&unit_dep.unit], + extern_crate_name: unit_dep.extern_crate_name, + public, + noprelude, + } + }) + .collect(); + SerializedUnit { + pkg_id: unit.pkg.package_id(), + target: &unit.target, + profile: &unit.profile, + platform: unit.kind, + mode: unit.mode, + features: &unit.features, + is_std: unit.is_std, + dependencies, + } + }) + .collect(); + let s = SerializedUnitGraph { + version: VERSION, + units: ser_units, + roots, + }; + + let stdout = std::io::stdout(); + let mut lock = stdout.lock(); + serde_json::to_writer(&mut lock, &s)?; + drop(writeln!(lock)); + Ok(()) +} diff -Nru cargo-0.44.1/src/cargo/core/compiler/unit.rs cargo-0.47.0/src/cargo/core/compiler/unit.rs --- cargo-0.44.1/src/cargo/core/compiler/unit.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/compiler/unit.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,11 +1,15 @@ -use crate::core::compiler::{CompileKind, CompileMode}; -use crate::core::{profiles::Profile, InternedString, Package, Target}; +use crate::core::compiler::{CompileKind, CompileMode, CrateType}; +use crate::core::manifest::{Target, TargetKind}; +use crate::core::{profiles::Profile, Package}; use crate::util::hex::short_hash; +use crate::util::interning::InternedString; +use crate::util::Config; use std::cell::RefCell; use std::collections::HashSet; use std::fmt; use std::hash::{Hash, Hasher}; use std::ops::Deref; +use std::rc::Rc; /// All information needed to define a unit. /// @@ -21,21 +25,21 @@ /// example, it needs to know the target architecture (OS, chip arch etc.) and it needs to know /// whether you want a debug or release build. There is enough information in this struct to figure /// all that out. -#[derive(Clone, Copy, PartialOrd, Ord)] -pub struct Unit<'a> { - inner: &'a UnitInner<'a>, +#[derive(Clone, PartialOrd, Ord)] +pub struct Unit { + inner: Rc, } /// Internal fields of `Unit` which `Unit` will dereference to. #[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub struct UnitInner<'a> { +pub struct UnitInner { /// Information about available targets, which files to include/exclude, etc. Basically stuff in /// `Cargo.toml`. - pub pkg: &'a Package, + pub pkg: Package, /// Information about the specific target to build, out of the possible targets in `pkg`. Not /// to be confused with *target-triple* (or *target architecture* ...), the target arch for a /// build. - pub target: &'a Target, + pub target: Target, /// The profile contains information about *how* the build should be run, including debug /// level, etc. pub profile: Profile, @@ -55,7 +59,7 @@ pub is_std: bool, } -impl UnitInner<'_> { +impl UnitInner { /// Returns whether compilation of this unit requires all upstream artifacts /// to be available. /// @@ -65,39 +69,52 @@ pub fn requires_upstream_objects(&self) -> bool { self.mode.is_any_test() || self.target.kind().requires_upstream_objects() } + + /// Returns whether or not this is a "local" package. + /// + /// A "local" package is one that the user can likely edit, or otherwise + /// wants warnings, etc. + pub fn is_local(&self) -> bool { + self.pkg.package_id().source_id().is_path() && !self.is_std + } + + /// Returns whether or not warnings should be displayed for this unit. + pub fn show_warnings(&self, config: &Config) -> bool { + self.is_local() || config.extra_verbose() + } } -impl<'a> Unit<'a> { +impl Unit { pub fn buildkey(&self) -> String { format!("{}-{}", self.pkg.name(), short_hash(self)) } } // Just hash the pointer for fast hashing -impl<'a> Hash for Unit<'a> { +impl Hash for Unit { fn hash(&self, hasher: &mut H) { - (self.inner as *const UnitInner<'a>).hash(hasher) + std::ptr::hash(&*self.inner, hasher) } } // Just equate the pointer since these are interned -impl<'a> PartialEq for Unit<'a> { - fn eq(&self, other: &Unit<'a>) -> bool { - self.inner as *const UnitInner<'a> == other.inner as *const UnitInner<'a> +impl PartialEq for Unit { + fn eq(&self, other: &Unit) -> bool { + std::ptr::eq(&*self.inner, &*other.inner) } } -impl<'a> Eq for Unit<'a> {} +impl Eq for Unit {} -impl<'a> Deref for Unit<'a> { - type Target = UnitInner<'a>; +impl Deref for Unit { + type Target = UnitInner; - fn deref(&self) -> &UnitInner<'a> { - self.inner + fn deref(&self) -> &UnitInner { + &*self.inner } } -impl<'a> fmt::Debug for Unit<'a> { +impl fmt::Debug for Unit { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Unit") .field("pkg", &self.pkg) @@ -117,17 +134,17 @@ /// efficient hash/equality implementation for `Unit`. All units are /// manufactured through an interner which guarantees that each equivalent value /// is only produced once. -pub struct UnitInterner<'a> { - state: RefCell>, +pub struct UnitInterner { + state: RefCell, } -struct InternerState<'a> { - cache: HashSet>>, +struct InternerState { + cache: HashSet>, } -impl<'a> UnitInterner<'a> { +impl UnitInterner { /// Creates a new blank interner - pub fn new() -> UnitInterner<'a> { + pub fn new() -> UnitInterner { UnitInterner { state: RefCell::new(InternerState { cache: HashSet::new(), @@ -139,17 +156,38 @@ /// will all be equivalent to the provided arguments, although they may not /// be the exact same instance. pub fn intern( - &'a self, - pkg: &'a Package, - target: &'a Target, + &self, + pkg: &Package, + target: &Target, profile: Profile, kind: CompileKind, mode: CompileMode, features: Vec, is_std: bool, - ) -> Unit<'a> { + ) -> Unit { + let target = match (is_std, target.kind()) { + // This is a horrible hack to support build-std. `libstd` declares + // itself with both rlib and dylib. We don't want the dylib for a + // few reasons: + // + // - dylibs don't have a hash in the filename. If you do something + // (like switch rustc versions), it will stomp on the dylib + // file, invalidating the entire cache (because std is a dep of + // everything). + // - We don't want to publicize the presence of dylib for the + // standard library. + // + // At some point in the future, it would be nice to have a + // first-class way of overriding or specifying crate-types. + (true, TargetKind::Lib(crate_types)) if crate_types.contains(&CrateType::Dylib) => { + let mut new_target = Target::clone(target); + new_target.set_kind(TargetKind::Lib(vec![CrateType::Rlib])); + new_target + } + _ => target.clone(), + }; let inner = self.intern_inner(&UnitInner { - pkg, + pkg: pkg.clone(), target, profile, kind, @@ -160,34 +198,13 @@ Unit { inner } } - // Ok so interning here is a little unsafe, hence the usage of `unsafe` - // internally. The primary issue here is that we've got an internal cache of - // `UnitInner` instances added so far, but we may need to mutate it to add - // it, and the mutation for an interner happens behind a shared borrow. - // - // Our goal though is to escape the lifetime `borrow_mut` to the same - // lifetime as the borrowed passed into this function. That's where `unsafe` - // comes into play. What we're subverting here is resizing internally in the - // `HashSet` as well as overwriting previous keys in the `HashSet`. - // - // As a result we store `Box` internally to have an extra layer - // of indirection. That way `*const UnitInner` is a stable address that - // doesn't change with `HashSet` resizing. Furthermore we're careful to - // never overwrite an entry once inserted. - // - // Ideally we'd use an off-the-shelf interner from crates.io which avoids a - // small amount of unsafety here, but at the time this was written one - // wasn't obviously available. - fn intern_inner(&'a self, item: &UnitInner<'a>) -> &'a UnitInner<'a> { + fn intern_inner(&self, item: &UnitInner) -> Rc { let mut me = self.state.borrow_mut(); if let Some(item) = me.cache.get(item) { - // note that `item` has type `&Box`. Use `&**` to - // convert that to `&UnitInner<'a>`, then do some trickery to extend - // the lifetime to the `'a` on the function here. - return unsafe { &*(&**item as *const UnitInner<'a>) }; + return item.clone(); } - me.cache.insert(Box::new(item.clone())); - let item = me.cache.get(item).unwrap(); - unsafe { &*(&**item as *const UnitInner<'a>) } + let item = Rc::new(item.clone()); + me.cache.insert(item.clone()); + item } } diff -Nru cargo-0.44.1/src/cargo/core/dependency.rs cargo-0.47.0/src/cargo/core/dependency.rs --- cargo-0.44.1/src/cargo/core/dependency.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/dependency.rs 2020-07-17 20:39:39.000000000 +0000 @@ -6,9 +6,9 @@ use serde::Serialize; use std::rc::Rc; -use crate::core::interning::InternedString; use crate::core::{PackageId, SourceId, Summary}; use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::interning::InternedString; use crate::util::Config; /// Information about a dependency requested by a Cargo manifest. @@ -393,8 +393,7 @@ /// Returns `true` if this is a "locked" dependency, basically whether it has /// an exact version req. pub fn is_locked(&self) -> bool { - // Kind of a hack to figure this out, but it works! - self.inner.req.to_string().starts_with('=') + self.inner.req.is_exact() } /// Returns `false` if the dependency is only used to build the local package. diff -Nru cargo-0.44.1/src/cargo/core/features.rs cargo-0.47.0/src/cargo/core/features.rs --- cargo-0.44.1/src/cargo/core/features.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/features.rs 2020-07-17 20:39:39.000000000 +0000 @@ -82,6 +82,11 @@ match s { "2015" => Ok(Edition::Edition2015), "2018" => Ok(Edition::Edition2018), + s if s.parse().map_or(false, |y: u16| y > 2020 && y < 2050) => bail!( + "this version of Cargo is older than the `{}` edition, \ + and only supports `2015` and `2018` editions.", + s + ), s => bail!( "supported edition values are `2015` or `2018`, but `{}` \ is unknown", @@ -206,6 +211,12 @@ // Allow to specify profiles other than 'dev', 'release', 'test', etc. [unstable] named_profiles: bool, + + // Opt-in new-resolver behavior. + [unstable] resolver: bool, + + // Allow to specify whether binaries should be stripped. + [unstable] strip: bool, } } @@ -343,6 +354,9 @@ pub jobserver_per_rustc: bool, pub features: Option>, pub crate_versions: bool, + pub separate_nightlies: bool, + pub multitarget: bool, + pub rustdoc_map: bool, } impl CliUnstable { @@ -420,6 +434,9 @@ "jobserver-per-rustc" => self.jobserver_per_rustc = parse_empty(k, v)?, "features" => self.features = Some(parse_features(v)), "crate-versions" => self.crate_versions = parse_empty(k, v)?, + "separate-nightlies" => self.separate_nightlies = parse_empty(k, v)?, + "multitarget" => self.multitarget = parse_empty(k, v)?, + "rustdoc-map" => self.rustdoc_map = parse_empty(k, v)?, _ => bail!("unknown `-Z` flag specified: {}", k), } diff -Nru cargo-0.44.1/src/cargo/core/interning.rs cargo-0.47.0/src/cargo/core/interning.rs --- cargo-0.44.1/src/cargo/core/interning.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/interning.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,182 +0,0 @@ -use serde::{Serialize, Serializer}; -use std::borrow::Borrow; -use std::cmp::Ordering; -use std::collections::HashSet; -use std::ffi::OsStr; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::ops::Deref; -use std::path::Path; -use std::ptr; -use std::str; -use std::sync::Mutex; - -pub fn leak(s: String) -> &'static str { - Box::leak(s.into_boxed_str()) -} - -lazy_static::lazy_static! { - static ref STRING_CACHE: Mutex> = Mutex::new(HashSet::new()); -} - -#[derive(Clone, Copy)] -pub struct InternedString { - inner: &'static str, -} - -impl<'a> From<&'a str> for InternedString { - fn from(item: &'a str) -> Self { - InternedString::new(item) - } -} - -impl<'a> From<&'a String> for InternedString { - fn from(item: &'a String) -> Self { - InternedString::new(item) - } -} - -impl From for InternedString { - fn from(item: String) -> Self { - InternedString::new(&item) - } -} - -impl PartialEq for InternedString { - fn eq(&self, other: &InternedString) -> bool { - ptr::eq(self.as_str(), other.as_str()) - } -} - -impl PartialEq for InternedString { - fn eq(&self, other: &str) -> bool { - *self == other - } -} - -impl<'a> PartialEq<&'a str> for InternedString { - fn eq(&self, other: &&str) -> bool { - **self == **other - } -} - -impl Eq for InternedString {} - -impl InternedString { - pub fn new(str: &str) -> InternedString { - let mut cache = STRING_CACHE.lock().unwrap(); - let s = cache.get(str).cloned().unwrap_or_else(|| { - let s = leak(str.to_string()); - cache.insert(s); - s - }); - - InternedString { inner: s } - } - - pub fn as_str(&self) -> &'static str { - self.inner - } -} - -impl Deref for InternedString { - type Target = str; - - fn deref(&self) -> &'static str { - self.as_str() - } -} - -impl AsRef for InternedString { - fn as_ref(&self) -> &str { - self.as_str() - } -} - -impl AsRef for InternedString { - fn as_ref(&self) -> &OsStr { - self.as_str().as_ref() - } -} - -impl AsRef for InternedString { - fn as_ref(&self) -> &Path { - self.as_str().as_ref() - } -} - -impl Hash for InternedString { - // N.B., we can't implement this as `identity(self).hash(state)`, - // because we use this for on-disk fingerprints and so need - // stability across Cargo invocations. - fn hash(&self, state: &mut H) { - self.as_str().hash(state); - } -} - -impl Borrow for InternedString { - // If we implement Hash as `identity(self).hash(state)`, - // then this will need to be removed. - fn borrow(&self) -> &str { - self.as_str() - } -} - -impl fmt::Debug for InternedString { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self.as_str(), f) - } -} - -impl fmt::Display for InternedString { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self.as_str(), f) - } -} - -impl Ord for InternedString { - fn cmp(&self, other: &InternedString) -> Ordering { - self.as_str().cmp(other.as_str()) - } -} - -impl PartialOrd for InternedString { - fn partial_cmp(&self, other: &InternedString) -> Option { - Some(self.cmp(other)) - } -} - -impl Serialize for InternedString { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(self.inner) - } -} - -struct InternedStringVisitor; - -impl<'de> serde::Deserialize<'de> for InternedString { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - deserializer.deserialize_str(InternedStringVisitor) - } -} - -impl<'de> serde::de::Visitor<'de> for InternedStringVisitor { - type Value = InternedString; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("an String like thing") - } - - fn visit_str(self, v: &str) -> Result - where - E: serde::de::Error, - { - Ok(InternedString::new(v)) - } -} diff -Nru cargo-0.44.1/src/cargo/core/manifest.rs cargo-0.47.0/src/cargo/core/manifest.rs --- cargo-0.44.1/src/cargo/core/manifest.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/manifest.rs 2020-07-17 20:39:39.000000000 +0000 @@ -3,16 +3,19 @@ use std::hash::{Hash, Hasher}; use std::path::{Path, PathBuf}; use std::rc::Rc; +use std::sync::Arc; use semver::Version; use serde::ser; use serde::Serialize; use url::Url; -use crate::core::interning::InternedString; +use crate::core::compiler::CrateType; +use crate::core::resolver::ResolveBehavior; use crate::core::{Dependency, PackageId, PackageIdSpec, SourceId, Summary}; use crate::core::{Edition, Feature, Features, WorkspaceConfig}; use crate::util::errors::*; +use crate::util::interning::InternedString; use crate::util::toml::{TomlManifest, TomlProfiles}; use crate::util::{short_hash, Config, Filesystem}; @@ -44,6 +47,7 @@ im_a_teapot: Option, default_run: Option, metabuild: Option>, + resolve_behavior: Option, } /// When parsing `Cargo.toml`, some warnings should silenced @@ -66,6 +70,7 @@ profiles: Option, warnings: Warnings, features: Features, + resolve_behavior: Option, } /// General metadata about a package which is just blindly uploaded to the @@ -92,73 +97,13 @@ pub links: Option, } -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum LibKind { - Lib, - Rlib, - Dylib, - ProcMacro, - Other(String), -} - -impl LibKind { - /// Returns the argument suitable for `--crate-type` to pass to rustc. - pub fn crate_type(&self) -> &str { - match *self { - LibKind::Lib => "lib", - LibKind::Rlib => "rlib", - LibKind::Dylib => "dylib", - LibKind::ProcMacro => "proc-macro", - LibKind::Other(ref s) => s, - } - } - - pub fn linkable(&self) -> bool { - match *self { - LibKind::Lib | LibKind::Rlib | LibKind::Dylib | LibKind::ProcMacro => true, - LibKind::Other(..) => false, - } - } - - pub fn requires_upstream_objects(&self) -> bool { - match *self { - // "lib" == "rlib" and is a compilation that doesn't actually - // require upstream object files to exist, only upstream metadata - // files. As a result, it doesn't require upstream artifacts - LibKind::Lib | LibKind::Rlib => false, - - // Everything else, however, is some form of "linkable output" or - // something that requires upstream object files. - _ => true, - } - } -} - -impl fmt::Debug for LibKind { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.crate_type().fmt(f) - } -} - -impl<'a> From<&'a String> for LibKind { - fn from(string: &'a String) -> Self { - match string.as_ref() { - "lib" => LibKind::Lib, - "rlib" => LibKind::Rlib, - "dylib" => LibKind::Dylib, - "proc-macro" => LibKind::ProcMacro, - s => LibKind::Other(s.to_string()), - } - } -} - #[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] pub enum TargetKind { - Lib(Vec), + Lib(Vec), Bin, Test, Bench, - ExampleLib(Vec), + ExampleLib(Vec), ExampleBin, CustomBuild, } @@ -169,8 +114,8 @@ S: ser::Serializer, { use self::TargetKind::*; - match *self { - Lib(ref kinds) => s.collect_seq(kinds.iter().map(LibKind::crate_type)), + match self { + Lib(kinds) => s.collect_seq(kinds.iter().map(|t| t.to_string())), Bin => ["bin"].serialize(s), ExampleBin | ExampleLib(_) => ["example"].serialize(s), Test => ["test"].serialize(s), @@ -219,12 +164,29 @@ _ => true, } } + + /// Returns the arguments suitable for `--crate-type` to pass to rustc. + pub fn rustc_crate_types(&self) -> Vec { + match self { + TargetKind::Lib(kinds) | TargetKind::ExampleLib(kinds) => kinds.clone(), + TargetKind::CustomBuild + | TargetKind::Bench + | TargetKind::Test + | TargetKind::ExampleBin + | TargetKind::Bin => vec![CrateType::Bin], + } + } } /// Information about a binary, a library, an example, etc. that is part of the /// package. #[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] pub struct Target { + inner: Arc, +} + +#[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +struct TargetInner { kind: TargetKind, name: String, // Note that the `src_path` here is excluded from the `Hash` implementation @@ -294,7 +256,7 @@ kind: &'a TargetKind, /// Corresponds to `--crate-type` compiler attribute. /// See https://doc.rust-lang.org/reference/linkage.html - crate_types: Vec<&'a str>, + crate_types: Vec, name: &'a str, src_path: Option<&'a PathBuf>, edition: &'a str, @@ -305,30 +267,35 @@ impl ser::Serialize for Target { fn serialize(&self, s: S) -> Result { - let src_path = match &self.src_path { + let src_path = match self.src_path() { TargetSourcePath::Path(p) => Some(p), // Unfortunately getting the correct path would require access to // target_dir, which is not available here. TargetSourcePath::Metabuild => None, }; SerializedTarget { - kind: &self.kind, + kind: self.kind(), crate_types: self.rustc_crate_types(), - name: &self.name, + name: self.name(), src_path, - edition: &self.edition.to_string(), + edition: &self.edition().to_string(), required_features: self - .required_features - .as_ref() + .required_features() .map(|rf| rf.iter().map(|s| &**s).collect()), - doctest: self.doctest && self.doctestable(), + doctest: self.doctested() && self.doctestable(), } .serialize(s) } } +impl fmt::Debug for Target { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + compact_debug! { - impl fmt::Debug for Target { + impl fmt::Debug for TargetInner { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let (default, default_name) = { match &self.kind { @@ -337,9 +304,9 @@ Target::lib_target( &self.name, kinds.clone(), - self.src_path().path().unwrap().to_path_buf(), + self.src_path.path().unwrap().to_path_buf(), self.edition, - ), + ).inner, format!("lib_target({:?}, {:?}, {:?}, {:?})", self.name, kinds, self.src_path, self.edition), ) @@ -352,21 +319,21 @@ &self.name, path.to_path_buf(), self.edition, - ), + ).inner, format!("custom_build_target({:?}, {:?}, {:?})", self.name, path, self.edition), ) } TargetSourcePath::Metabuild => { ( - Target::metabuild_target(&self.name), + Target::metabuild_target(&self.name).inner, format!("metabuild_target({:?})", self.name), ) } } } _ => ( - Target::new(self.src_path.clone(), self.edition), + Target::new(self.src_path.clone(), self.edition).inner, format!("with_path({:?}, {:?})", self.src_path, self.edition), ), } @@ -410,6 +377,7 @@ default_run: Option, original: Rc, metabuild: Option>, + resolve_behavior: Option, ) -> Manifest { Manifest { summary, @@ -432,6 +400,7 @@ default_run, publish_lockfile, metabuild, + resolve_behavior, } } @@ -462,9 +431,6 @@ pub fn targets(&self) -> &[Target] { &self.targets } - pub fn targets_mut(&mut self) -> &mut [Target] { - &mut self.targets - } pub fn version(&self) -> &Version { self.package_id().version() } @@ -490,7 +456,7 @@ &self.patch } pub fn links(&self) -> Option<&str> { - self.links.as_ref().map(|s| &s[..]) + self.links.as_deref() } pub fn workspace_config(&self) -> &WorkspaceConfig { @@ -501,6 +467,13 @@ &self.features } + /// The style of resolver behavior to use, declared with the `resolver` field. + /// + /// Returns `None` if it is not specified. + pub fn resolve_behavior(&self) -> Option { + self.resolve_behavior + } + pub fn map_source(self, to_replace: SourceId, replace_with: SourceId) -> Manifest { Manifest { summary: self.summary.map_source(to_replace, replace_with), @@ -527,7 +500,7 @@ pub fn print_teapot(&self, config: &Config) { if let Some(teapot) = self.im_a_teapot { if config.cli_unstable().print_im_a_teapot { - println!("im-a-teapot = {}", teapot); + crate::drop_println!(config, "im-a-teapot = {}", teapot); } } } @@ -541,7 +514,7 @@ } pub fn default_run(&self) -> Option<&str> { - self.default_run.as_ref().map(|s| &s[..]) + self.default_run.as_deref() } pub fn metabuild(&self) -> Option<&Vec> { @@ -564,6 +537,7 @@ workspace: WorkspaceConfig, profiles: Option, features: Features, + resolve_behavior: Option, ) -> VirtualManifest { VirtualManifest { replace, @@ -572,6 +546,7 @@ profiles, warnings: Warnings::new(), features, + resolve_behavior, } } @@ -602,23 +577,32 @@ pub fn features(&self) -> &Features { &self.features } + + /// The style of resolver behavior to use, declared with the `resolver` field. + /// + /// Returns `None` if it is not specified. + pub fn resolve_behavior(&self) -> Option { + self.resolve_behavior + } } impl Target { fn new(src_path: TargetSourcePath, edition: Edition) -> Target { Target { - kind: TargetKind::Bin, - name: String::new(), - src_path, - required_features: None, - doc: false, - doctest: false, - harness: true, - for_host: false, - proc_macro: false, - edition, - tested: true, - benched: true, + inner: Arc::new(TargetInner { + kind: TargetKind::Bin, + name: String::new(), + src_path, + required_features: None, + doc: false, + doctest: false, + harness: true, + for_host: false, + proc_macro: false, + edition, + tested: true, + benched: true, + }), } } @@ -628,17 +612,17 @@ pub fn lib_target( name: &str, - crate_targets: Vec, + crate_targets: Vec, src_path: PathBuf, edition: Edition, ) -> Target { - Target { - kind: TargetKind::Lib(crate_targets), - name: name.to_string(), - doctest: true, - doc: true, - ..Target::with_path(src_path, edition) - } + let mut target = Target::with_path(src_path, edition); + target + .set_kind(TargetKind::Lib(crate_targets)) + .set_name(name) + .set_doctest(true) + .set_doc(true); + target } pub fn bin_target( @@ -647,63 +631,59 @@ required_features: Option>, edition: Edition, ) -> Target { - Target { - kind: TargetKind::Bin, - name: name.to_string(), - required_features, - doc: true, - ..Target::with_path(src_path, edition) - } + let mut target = Target::with_path(src_path, edition); + target + .set_kind(TargetKind::Bin) + .set_name(name) + .set_required_features(required_features) + .set_doc(true); + target } /// Builds a `Target` corresponding to the `build = "build.rs"` entry. pub fn custom_build_target(name: &str, src_path: PathBuf, edition: Edition) -> Target { - Target { - kind: TargetKind::CustomBuild, - name: name.to_string(), - for_host: true, - benched: false, - tested: false, - ..Target::with_path(src_path, edition) - } + let mut target = Target::with_path(src_path, edition); + target + .set_kind(TargetKind::CustomBuild) + .set_name(name) + .set_for_host(true) + .set_benched(false) + .set_tested(false); + target } pub fn metabuild_target(name: &str) -> Target { - Target { - kind: TargetKind::CustomBuild, - name: name.to_string(), - for_host: true, - benched: false, - tested: false, - ..Target::new(TargetSourcePath::Metabuild, Edition::Edition2018) - } + let mut target = Target::new(TargetSourcePath::Metabuild, Edition::Edition2018); + target + .set_kind(TargetKind::CustomBuild) + .set_name(name) + .set_for_host(true) + .set_benched(false) + .set_tested(false); + target } pub fn example_target( name: &str, - crate_targets: Vec, + crate_targets: Vec, src_path: PathBuf, required_features: Option>, edition: Edition, ) -> Target { - let kind = if crate_targets.is_empty() - || crate_targets - .iter() - .all(|t| *t == LibKind::Other("bin".into())) + let kind = if crate_targets.is_empty() || crate_targets.iter().all(|t| *t == CrateType::Bin) { TargetKind::ExampleBin } else { TargetKind::ExampleLib(crate_targets) }; - - Target { - kind, - name: name.to_string(), - required_features, - tested: false, - benched: false, - ..Target::with_path(src_path, edition) - } + let mut target = Target::with_path(src_path, edition); + target + .set_kind(kind) + .set_name(name) + .set_required_features(required_features) + .set_tested(false) + .set_benched(false); + target } pub fn test_target( @@ -712,13 +692,13 @@ required_features: Option>, edition: Edition, ) -> Target { - Target { - kind: TargetKind::Test, - name: name.to_string(), - required_features, - benched: false, - ..Target::with_path(src_path, edition) - } + let mut target = Target::with_path(src_path, edition); + target + .set_kind(TargetKind::Test) + .set_name(name) + .set_required_features(required_features) + .set_benched(false); + target } pub fn bench_target( @@ -727,117 +707,106 @@ required_features: Option>, edition: Edition, ) -> Target { - Target { - kind: TargetKind::Bench, - name: name.to_string(), - required_features, - tested: false, - ..Target::with_path(src_path, edition) - } + let mut target = Target::with_path(src_path, edition); + target + .set_kind(TargetKind::Bench) + .set_name(name) + .set_required_features(required_features) + .set_tested(false); + target } pub fn name(&self) -> &str { - &self.name + &self.inner.name } pub fn crate_name(&self) -> String { - self.name.replace("-", "_") + self.name().replace("-", "_") } pub fn src_path(&self) -> &TargetSourcePath { - &self.src_path + &self.inner.src_path } pub fn set_src_path(&mut self, src_path: TargetSourcePath) { - self.src_path = src_path; + Arc::make_mut(&mut self.inner).src_path = src_path; } pub fn required_features(&self) -> Option<&Vec> { - self.required_features.as_ref() + self.inner.required_features.as_ref() } pub fn kind(&self) -> &TargetKind { - &self.kind - } - pub fn kind_mut(&mut self) -> &mut TargetKind { - &mut self.kind + &self.inner.kind } pub fn tested(&self) -> bool { - self.tested + self.inner.tested } pub fn harness(&self) -> bool { - self.harness + self.inner.harness } pub fn documented(&self) -> bool { - self.doc + self.inner.doc } // A plugin, proc-macro, or build-script. pub fn for_host(&self) -> bool { - self.for_host + self.inner.for_host } pub fn proc_macro(&self) -> bool { - self.proc_macro + self.inner.proc_macro } pub fn edition(&self) -> Edition { - self.edition + self.inner.edition } pub fn benched(&self) -> bool { - self.benched + self.inner.benched } pub fn doctested(&self) -> bool { - self.doctest + self.inner.doctest } pub fn doctestable(&self) -> bool { - match self.kind { - TargetKind::Lib(ref kinds) => kinds - .iter() - .any(|k| *k == LibKind::Rlib || *k == LibKind::Lib || *k == LibKind::ProcMacro), + match self.kind() { + TargetKind::Lib(ref kinds) => kinds.iter().any(|k| { + *k == CrateType::Rlib || *k == CrateType::Lib || *k == CrateType::ProcMacro + }), _ => false, } } - pub fn allows_underscores(&self) -> bool { - self.is_bin() || self.is_example() || self.is_custom_build() - } - pub fn is_lib(&self) -> bool { - match self.kind { + match self.kind() { TargetKind::Lib(_) => true, _ => false, } } pub fn is_dylib(&self) -> bool { - match self.kind { - TargetKind::Lib(ref libs) => libs.iter().any(|l| *l == LibKind::Dylib), + match self.kind() { + TargetKind::Lib(libs) => libs.iter().any(|l| *l == CrateType::Dylib), _ => false, } } pub fn is_cdylib(&self) -> bool { - let libs = match self.kind { - TargetKind::Lib(ref libs) => libs, - _ => return false, - }; - libs.iter().any(|l| match *l { - LibKind::Other(ref s) => s == "cdylib", + match self.kind() { + TargetKind::Lib(libs) => libs.iter().any(|l| *l == CrateType::Cdylib), _ => false, - }) + } } /// Returns whether this target produces an artifact which can be linked /// into a Rust crate. /// /// This only returns true for certain kinds of libraries. - pub fn linkable(&self) -> bool { - match self.kind { - TargetKind::Lib(ref kinds) => kinds.iter().any(|k| k.linkable()), + pub fn is_linkable(&self) -> bool { + match self.kind() { + TargetKind::Lib(kinds) => kinds.iter().any(|k| k.is_linkable()), _ => false, } } pub fn is_bin(&self) -> bool { - self.kind == TargetKind::Bin + *self.kind() == TargetKind::Bin } pub fn is_example(&self) -> bool { - match self.kind { + match self.kind() { TargetKind::ExampleBin | TargetKind::ExampleLib(..) => true, _ => false, } @@ -852,82 +821,74 @@ /// Returns `true` if it is an executable example. pub fn is_exe_example(&self) -> bool { // Needed for --all-examples in contexts where only runnable examples make sense - match self.kind { + match self.kind() { TargetKind::ExampleBin => true, _ => false, } } pub fn is_test(&self) -> bool { - self.kind == TargetKind::Test + *self.kind() == TargetKind::Test } pub fn is_bench(&self) -> bool { - self.kind == TargetKind::Bench + *self.kind() == TargetKind::Bench } pub fn is_custom_build(&self) -> bool { - self.kind == TargetKind::CustomBuild + *self.kind() == TargetKind::CustomBuild } /// Returns the arguments suitable for `--crate-type` to pass to rustc. - pub fn rustc_crate_types(&self) -> Vec<&str> { - match self.kind { - TargetKind::Lib(ref kinds) | TargetKind::ExampleLib(ref kinds) => { - kinds.iter().map(LibKind::crate_type).collect() - } - TargetKind::CustomBuild - | TargetKind::Bench - | TargetKind::Test - | TargetKind::ExampleBin - | TargetKind::Bin => vec!["bin"], - } - } - - pub fn can_lto(&self) -> bool { - match self.kind { - TargetKind::Lib(ref v) => { - !v.contains(&LibKind::Rlib) - && !v.contains(&LibKind::Dylib) - && !v.contains(&LibKind::Lib) - } - _ => true, - } + pub fn rustc_crate_types(&self) -> Vec { + self.kind().rustc_crate_types() } pub fn set_tested(&mut self, tested: bool) -> &mut Target { - self.tested = tested; + Arc::make_mut(&mut self.inner).tested = tested; self } pub fn set_benched(&mut self, benched: bool) -> &mut Target { - self.benched = benched; + Arc::make_mut(&mut self.inner).benched = benched; self } pub fn set_doctest(&mut self, doctest: bool) -> &mut Target { - self.doctest = doctest; + Arc::make_mut(&mut self.inner).doctest = doctest; self } pub fn set_for_host(&mut self, for_host: bool) -> &mut Target { - self.for_host = for_host; + Arc::make_mut(&mut self.inner).for_host = for_host; self } pub fn set_proc_macro(&mut self, proc_macro: bool) -> &mut Target { - self.proc_macro = proc_macro; + Arc::make_mut(&mut self.inner).proc_macro = proc_macro; self } pub fn set_edition(&mut self, edition: Edition) -> &mut Target { - self.edition = edition; + Arc::make_mut(&mut self.inner).edition = edition; self } pub fn set_harness(&mut self, harness: bool) -> &mut Target { - self.harness = harness; + Arc::make_mut(&mut self.inner).harness = harness; self } pub fn set_doc(&mut self, doc: bool) -> &mut Target { - self.doc = doc; + Arc::make_mut(&mut self.inner).doc = doc; + self + } + pub fn set_kind(&mut self, kind: TargetKind) -> &mut Target { + Arc::make_mut(&mut self.inner).kind = kind; + self + } + pub fn set_name(&mut self, name: &str) -> &mut Target { + Arc::make_mut(&mut self.inner).name = name.to_string(); + self + } + pub fn set_required_features(&mut self, required_features: Option>) -> &mut Target { + Arc::make_mut(&mut self.inner).required_features = required_features; self } pub fn description_named(&self) -> String { - match self.kind { + match self.kind() { TargetKind::Lib(..) => "lib".to_string(), TargetKind::Bin => format!("bin \"{}\"", self.name()), TargetKind::Test => format!("test \"{}\"", self.name()), @@ -942,13 +903,13 @@ impl fmt::Display for Target { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.kind { + match self.kind() { TargetKind::Lib(..) => write!(f, "Target(lib)"), - TargetKind::Bin => write!(f, "Target(bin: {})", self.name), - TargetKind::Test => write!(f, "Target(test: {})", self.name), - TargetKind::Bench => write!(f, "Target(bench: {})", self.name), + TargetKind::Bin => write!(f, "Target(bin: {})", self.name()), + TargetKind::Test => write!(f, "Target(test: {})", self.name()), + TargetKind::Bench => write!(f, "Target(bench: {})", self.name()), TargetKind::ExampleBin | TargetKind::ExampleLib(..) => { - write!(f, "Target(example: {})", self.name) + write!(f, "Target(example: {})", self.name()) } TargetKind::CustomBuild => write!(f, "Target(script)"), } diff -Nru cargo-0.44.1/src/cargo/core/mod.rs cargo-0.47.0/src/cargo/core/mod.rs --- cargo-0.44.1/src/cargo/core/mod.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/mod.rs 2020-07-17 20:39:39.000000000 +0000 @@ -3,9 +3,8 @@ enable_nightly_features, maybe_allow_nightly_features, nightly_features_allowed, }; pub use self::features::{CliUnstable, Edition, Feature, Features}; -pub use self::interning::InternedString; pub use self::manifest::{EitherManifest, VirtualManifest}; -pub use self::manifest::{LibKind, Manifest, Target, TargetKind}; +pub use self::manifest::{Manifest, Target, TargetKind}; pub use self::package::{Package, PackageSet}; pub use self::package_id::PackageId; pub use self::package_id_spec::PackageIdSpec; @@ -19,7 +18,6 @@ pub mod compiler; pub mod dependency; pub mod features; -mod interning; pub mod manifest; pub mod package; pub mod package_id; diff -Nru cargo-0.44.1/src/cargo/core/package_id.rs cargo-0.47.0/src/cargo/core/package_id.rs --- cargo-0.44.1/src/cargo/core/package_id.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/package_id.rs 2020-07-17 20:39:39.000000000 +0000 @@ -9,8 +9,8 @@ use serde::de; use serde::ser; -use crate::core::interning::InternedString; use crate::core::source::SourceId; +use crate::util::interning::InternedString; use crate::util::{CargoResult, ToSemver}; lazy_static::lazy_static! { diff -Nru cargo-0.44.1/src/cargo/core/package_id_spec.rs cargo-0.47.0/src/cargo/core/package_id_spec.rs --- cargo-0.44.1/src/cargo/core/package_id_spec.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/package_id_spec.rs 2020-07-17 20:39:39.000000000 +0000 @@ -5,9 +5,9 @@ use serde::{de, ser}; use url::Url; -use crate::core::interning::InternedString; use crate::core::PackageId; use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::interning::InternedString; use crate::util::{validate_package_name, IntoUrl, ToSemver}; /// Some or all of the data required to identify a package: @@ -274,8 +274,8 @@ #[cfg(test)] mod tests { use super::PackageIdSpec; - use crate::core::interning::InternedString; use crate::core::{PackageId, SourceId}; + use crate::util::interning::InternedString; use crate::util::ToSemver; use url::Url; diff -Nru cargo-0.44.1/src/cargo/core/package.rs cargo-0.47.0/src/cargo/core/package.rs --- cargo-0.44.1/src/cargo/core/package.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/package.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,10 +1,11 @@ use std::cell::{Cell, Ref, RefCell, RefMut}; use std::cmp::Ordering; -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeSet, HashMap, HashSet}; use std::fmt; use std::hash; use std::mem; use std::path::{Path, PathBuf}; +use std::rc::Rc; use std::time::{Duration, Instant}; use anyhow::Context; @@ -17,16 +18,33 @@ use serde::ser; use serde::Serialize; -use crate::core::interning::InternedString; +use crate::core::compiler::{CompileKind, RustcTargetData}; +use crate::core::dependency::DepKind; +use crate::core::resolver::{HasDevUnits, Resolve}; use crate::core::source::MaybePackage; use crate::core::{Dependency, Manifest, PackageId, SourceId, Target}; -use crate::core::{FeatureMap, SourceMap, Summary}; +use crate::core::{FeatureMap, SourceMap, Summary, Workspace}; use crate::ops; use crate::util::config::PackageCacheLock; use crate::util::errors::{CargoResult, CargoResultExt, HttpNot200}; +use crate::util::interning::InternedString; use crate::util::network::Retry; use crate::util::{self, internal, Config, Progress, ProgressStyle}; +pub const MANIFEST_PREAMBLE: &str = "\ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# \"normalize\" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) +"; + /// Information about a package that is available somewhere in the file system. /// /// A package is a `Cargo.toml` file plus all the files that are part of it. @@ -34,6 +52,11 @@ // TODO: is `manifest_path` a relic? #[derive(Clone)] pub struct Package { + inner: Rc, +} + +#[derive(Clone)] +struct PackageInner { /// The package's manifest. manifest: Manifest, /// The root of the package. @@ -84,22 +107,22 @@ where S: ser::Serializer, { - let summary = self.manifest.summary(); + let summary = self.manifest().summary(); let package_id = summary.package_id(); - let manmeta = self.manifest.metadata(); - let license = manmeta.license.as_ref().map(String::as_ref); - let license_file = manmeta.license_file.as_ref().map(String::as_ref); - let description = manmeta.description.as_ref().map(String::as_ref); + let manmeta = self.manifest().metadata(); + let license = manmeta.license.as_deref(); + let license_file = manmeta.license_file.as_deref(); + let description = manmeta.description.as_deref(); let authors = manmeta.authors.as_ref(); let categories = manmeta.categories.as_ref(); let keywords = manmeta.keywords.as_ref(); - let readme = manmeta.readme.as_ref().map(String::as_ref); - let repository = manmeta.repository.as_ref().map(String::as_ref); + let readme = manmeta.readme.as_deref(); + let repository = manmeta.repository.as_deref(); // Filter out metabuild targets. They are an internal implementation // detail that is probably not relevant externally. There's also not a // real path to show in `src_path`, and this avoids changing the format. let targets: Vec<&Target> = self - .manifest + .manifest() .targets() .iter() .filter(|t| t.src_path().is_path()) @@ -116,16 +139,16 @@ dependencies: summary.dependencies(), targets, features: summary.features(), - manifest_path: &self.manifest_path, - metadata: self.manifest.custom_metadata(), + manifest_path: self.manifest_path(), + metadata: self.manifest().custom_metadata(), authors, categories, keywords, readme, repository, - edition: &self.manifest.edition().to_string(), - links: self.manifest.links(), - metabuild: self.manifest.metabuild(), + edition: &self.manifest().edition().to_string(), + links: self.manifest().links(), + metabuild: self.manifest().metabuild(), publish: self.publish().as_ref(), } .serialize(s) @@ -136,26 +159,28 @@ /// Creates a package from a manifest and its location. pub fn new(manifest: Manifest, manifest_path: &Path) -> Package { Package { - manifest, - manifest_path: manifest_path.to_path_buf(), + inner: Rc::new(PackageInner { + manifest, + manifest_path: manifest_path.to_path_buf(), + }), } } /// Gets the manifest dependencies. pub fn dependencies(&self) -> &[Dependency] { - self.manifest.dependencies() + self.manifest().dependencies() } /// Gets the manifest. pub fn manifest(&self) -> &Manifest { - &self.manifest + &self.inner.manifest } /// Gets the manifest. pub fn manifest_mut(&mut self) -> &mut Manifest { - &mut self.manifest + &mut Rc::make_mut(&mut self.inner).manifest } /// Gets the path to the manifest. pub fn manifest_path(&self) -> &Path { - &self.manifest_path + &self.inner.manifest_path } /// Gets the name of the package. pub fn name(&self) -> InternedString { @@ -163,19 +188,19 @@ } /// Gets the `PackageId` object for the package (fully defines a package). pub fn package_id(&self) -> PackageId { - self.manifest.package_id() + self.manifest().package_id() } /// Gets the root folder of the package. pub fn root(&self) -> &Path { - self.manifest_path.parent().unwrap() + self.manifest_path().parent().unwrap() } /// Gets the summary for the package. pub fn summary(&self) -> &Summary { - self.manifest.summary() + self.manifest().summary() } /// Gets the targets specified in the manifest. pub fn targets(&self) -> &[Target] { - self.manifest.targets() + self.manifest().targets() } /// Gets the current package version. pub fn version(&self) -> &Version { @@ -183,11 +208,15 @@ } /// Gets the package authors. pub fn authors(&self) -> &Vec { - &self.manifest.metadata().authors + &self.manifest().metadata().authors } /// Returns `true` if the package is set to publish. pub fn publish(&self) -> &Option> { - self.manifest.publish() + self.manifest().publish() + } + /// Returns `true` if this package is a proc-macro. + pub fn proc_macro(&self) -> bool { + self.targets().iter().any(|target| target.proc_macro()) } /// Returns `true` if the package uses a custom build script for any target. @@ -197,34 +226,20 @@ pub fn map_source(self, to_replace: SourceId, replace_with: SourceId) -> Package { Package { - manifest: self.manifest.map_source(to_replace, replace_with), - manifest_path: self.manifest_path, + inner: Rc::new(PackageInner { + manifest: self.manifest().clone().map_source(to_replace, replace_with), + manifest_path: self.manifest_path().to_owned(), + }), } } - pub fn to_registry_toml(&self, config: &Config) -> CargoResult { + pub fn to_registry_toml(&self, ws: &Workspace<'_>) -> CargoResult { let manifest = self .manifest() .original() - .prepare_for_publish(config, self.root())?; + .prepare_for_publish(ws, self.root())?; let toml = toml::to_string(&manifest)?; - Ok(format!( - "# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO\n\ - #\n\ - # When uploading crates to the registry Cargo will automatically\n\ - # \"normalize\" Cargo.toml files for maximal compatibility\n\ - # with all versions of Cargo and also rewrite `path` dependencies\n\ - # to registry (e.g., crates.io) dependencies\n\ - #\n\ - # If you believe there's an error in this file please file an\n\ - # issue against the rust-lang/cargo repository. If you're\n\ - # editing this file be aware that the upstream Cargo.toml\n\ - # will likely look very different (and much more reasonable)\n\ - \n\ - {}\ - ", - toml - )) + Ok(format!("{}\n{}", MANIFEST_PREAMBLE, toml)) } /// Returns if package should include `Cargo.lock`. @@ -432,6 +447,9 @@ } pub fn get_one(&self, id: PackageId) -> CargoResult<&Package> { + if let Some(pkg) = self.packages.get(&id).and_then(|slot| slot.borrow()) { + return Ok(pkg); + } Ok(self.get_many(Some(id))?.remove(0)) } @@ -448,6 +466,77 @@ Ok(pkgs) } + /// Downloads any packages accessible from the give root ids. + pub fn download_accessible( + &self, + resolve: &Resolve, + root_ids: &[PackageId], + has_dev_units: HasDevUnits, + requested_kinds: &[CompileKind], + target_data: &RustcTargetData, + ) -> CargoResult<()> { + fn collect_used_deps( + used: &mut BTreeSet, + resolve: &Resolve, + pkg_id: PackageId, + has_dev_units: HasDevUnits, + requested_kinds: &[CompileKind], + target_data: &RustcTargetData, + ) -> CargoResult<()> { + if !used.insert(pkg_id) { + return Ok(()); + } + let filtered_deps = resolve.deps(pkg_id).filter(|&(_id, deps)| { + deps.iter().any(|dep| { + if dep.kind() == DepKind::Development && has_dev_units == HasDevUnits::No { + return false; + } + // This is overly broad, since not all target-specific + // dependencies are used both for target and host. To tighten this + // up, this function would need to track "for_host" similar to how + // unit dependencies handles it. + let activated = requested_kinds + .iter() + .chain(Some(&CompileKind::Host)) + .any(|kind| target_data.dep_platform_activated(dep, *kind)); + if !activated { + return false; + } + true + }) + }); + for (dep_id, _deps) in filtered_deps { + collect_used_deps( + used, + resolve, + dep_id, + has_dev_units, + requested_kinds, + target_data, + )?; + } + Ok(()) + } + + // This is sorted by PackageId to get consistent behavior and error + // messages for Cargo's testsuite. Perhaps there is a better ordering + // that optimizes download time? + let mut to_download = BTreeSet::new(); + + for id in root_ids { + collect_used_deps( + &mut to_download, + resolve, + *id, + has_dev_units, + requested_kinds, + target_data, + )?; + } + self.get_many(to_download.into_iter())?; + Ok(()) + } + pub fn sources(&self) -> Ref<'_, SourceMap<'cfg>> { self.sources.borrow() } @@ -467,15 +556,6 @@ let other_sources = set.sources.into_inner(); sources.add_source_map(other_sources); } - - /// Get mutable access to an already downloaded package, if it's already - /// downoaded and it's part of this set. Does not actually attempt to - /// download anything if it's not already downloaded. - pub fn lookup_mut(&mut self, id: PackageId) -> Option<&mut Package> { - self.packages - .get_mut(&id) - .and_then(|cell| cell.borrow_mut()) - } } // When dynamically linked against libcurl, we want to ignore some failures @@ -655,7 +735,7 @@ .pending .remove(&token) .expect("got a token for a non-in-progress transfer"); - let data = mem::replace(&mut *dl.data.borrow_mut(), Vec::new()); + let data = mem::take(&mut *dl.data.borrow_mut()); let mut handle = self.set.multi.remove(handle)?; self.pending_ids.remove(&dl.id); @@ -855,7 +935,7 @@ } // If we've spent too long not actually receiving any data we time out. - if now - self.updated_at.get() > self.timeout.dur { + if now > self.updated_at.get() + self.timeout.dur { self.updated_at.set(now); let msg = format!( "failed to download any data for `{}` within {}s", diff -Nru cargo-0.44.1/src/cargo/core/profiles.rs cargo-0.47.0/src/cargo/core/profiles.rs --- cargo-0.44.1/src/cargo/core/profiles.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/profiles.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,8 +1,8 @@ use crate::core::compiler::CompileMode; -use crate::core::interning::InternedString; use crate::core::resolver::features::FeaturesFor; use crate::core::{Feature, Features, PackageId, PackageIdSpec, Resolve, Shell}; use crate::util::errors::CargoResultExt; +use crate::util::interning::InternedString; use crate::util::toml::{ProfilePackageSpec, StringOrBool, TomlProfile, TomlProfiles, U32OrBool}; use crate::util::{closest_msg, config, CargoResult, Config}; use anyhow::bail; @@ -287,6 +287,7 @@ &self, pkg_id: PackageId, is_member: bool, + is_local: bool, unit_for: UnitFor, mode: CompileMode, ) -> Profile { @@ -360,7 +361,7 @@ // itself (aka crates.io / git dependencies) // // (see also https://github.com/rust-lang/cargo/issues/3972) - if !pkg_id.source_id().is_path() { + if !is_local { profile.incremental = false; } profile.name = profile_name; @@ -374,6 +375,7 @@ /// times). pub fn get_profile_run_custom_build(&self, for_unit_profile: &Profile) -> Profile { let mut result = Profile::default(); + result.name = for_unit_profile.name; result.root = for_unit_profile.root; result.debuginfo = for_unit_profile.debuginfo; result.opt_level = for_unit_profile.opt_level; @@ -531,6 +533,9 @@ } match toml.lto { Some(StringOrBool::Bool(b)) => profile.lto = Lto::Bool(b), + Some(StringOrBool::String(ref n)) if matches!(n.as_str(), "off" | "n" | "no") => { + profile.lto = Lto::Off + } Some(StringOrBool::String(ref n)) => profile.lto = Lto::Named(InternedString::new(n)), None => {} } @@ -563,6 +568,9 @@ if let Some(incremental) = toml.incremental { profile.incremental = incremental; } + if let Some(strip) = toml.strip { + profile.strip = strip; + } } /// The root profile (dev/release). @@ -578,10 +586,11 @@ /// Profile settings used to determine which compiler flags to use for a /// target. -#[derive(Clone, Copy, Eq, PartialOrd, Ord)] +#[derive(Clone, Copy, Eq, PartialOrd, Ord, serde::Serialize)] pub struct Profile { pub name: InternedString, pub opt_level: InternedString, + #[serde(skip)] // named profiles are unstable pub root: ProfileRoot, pub lto: Lto, // `None` means use rustc default. @@ -592,6 +601,7 @@ pub rpath: bool, pub incremental: bool, pub panic: PanicStrategy, + pub strip: Strip, } impl Default for Profile { @@ -608,6 +618,7 @@ rpath: false, incremental: false, panic: PanicStrategy::Unwind, + strip: Strip::None, } } } @@ -632,6 +643,7 @@ rpath incremental panic + strip )] } } @@ -718,6 +730,7 @@ bool, bool, PanicStrategy, + Strip, ) { ( self.opt_level, @@ -729,6 +742,7 @@ self.rpath, self.incremental, self.panic, + self.strip, ) } } @@ -736,15 +750,31 @@ /// The link-time-optimization setting. #[derive(Clone, Copy, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)] pub enum Lto { - /// False = no LTO + /// Explicitly no LTO, disables thin-LTO. + Off, /// True = "Fat" LTO + /// False = rustc default (no args), currently "thin LTO" Bool(bool), /// Named LTO settings like "thin". Named(InternedString), } +impl serde::ser::Serialize for Lto { + fn serialize(&self, s: S) -> Result + where + S: serde::ser::Serializer, + { + match self { + Lto::Off => "off".serialize(s), + Lto::Bool(b) => b.to_string().serialize(s), + Lto::Named(n) => n.serialize(s), + } + } +} + /// The `panic` setting. -#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)] +#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash, PartialOrd, Ord, serde::Serialize)] +#[serde(rename_all = "lowercase")] pub enum PanicStrategy { Unwind, Abort, @@ -760,6 +790,30 @@ } } +/// The setting for choosing which symbols to strip +#[derive( + Clone, Copy, PartialEq, Eq, Debug, Hash, PartialOrd, Ord, serde::Serialize, serde::Deserialize, +)] +#[serde(rename_all = "lowercase")] +pub enum Strip { + /// Only strip debugging symbols + DebugInfo, + /// Don't remove any symbols + None, + /// Strip all non-exported symbols from the final binary + Symbols, +} + +impl fmt::Display for Strip { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Strip::DebugInfo => "debuginfo", + Strip::None => "none", + Strip::Symbols => "symbols", + } + .fmt(f) + } +} /// Flags used in creating `Unit`s to indicate the purpose for the target, and /// to ensure the target's dependencies have the correct settings. #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)] @@ -768,7 +822,7 @@ /// any of its dependencies. This enables `build-override` profiles for /// these targets. /// - /// An invariant is that if `build_dep` is true, `host` must be true. + /// An invariant is that if `host_features` is true, `host` must be true. /// /// Note that this is `true` for `RunCustomBuild` units, even though that /// unit should *not* use build-override profiles. This is a bit of a @@ -779,16 +833,16 @@ /// sticky (and forced to `true` for all further dependencies) — which is /// the whole point of `UnitFor`. host: bool, - /// A target for a build dependency (or any of its dependencies). This is - /// used for computing features of build dependencies independently of - /// other dependency kinds. + /// A target for a build dependency or proc-macro (or any of its + /// dependencies). This is used for computing features of build + /// dependencies and proc-macros independently of other dependency kinds. /// /// The subtle difference between this and `host` is that the build script /// for a non-host package sets this to `false` because it wants the /// features of the non-host package (whereas `host` is true because the - /// build script is being built for the host). `build_dep` becomes `true` - /// for build-dependencies, or any of their dependencies. For example, with - /// this dependency tree: + /// build script is being built for the host). `host_features` becomes + /// `true` for build-dependencies or proc-macros, or any of their + /// dependencies. For example, with this dependency tree: /// /// ```text /// foo @@ -799,17 +853,18 @@ /// └── shared_dep build.rs /// ``` /// - /// In this example, `foo build.rs` is HOST=true, BUILD_DEP=false. This is - /// so that `foo build.rs` gets the profile settings for build scripts - /// (HOST=true) and features of foo (BUILD_DEP=false) because build scripts - /// need to know which features their package is being built with. + /// In this example, `foo build.rs` is HOST=true, HOST_FEATURES=false. + /// This is so that `foo build.rs` gets the profile settings for build + /// scripts (HOST=true) and features of foo (HOST_FEATURES=false) because + /// build scripts need to know which features their package is being built + /// with. /// /// But in the case of `shared_dep`, when built as a build dependency, /// both flags are true (it only wants the build-dependency features). /// When `shared_dep` is built as a normal dependency, then `shared_dep - /// build.rs` is HOST=true, BUILD_DEP=false for the same reasons that + /// build.rs` is HOST=true, HOST_FEATURES=false for the same reasons that /// foo's build script is set that way. - build_dep: bool, + host_features: bool, /// How Cargo processes the `panic` setting or profiles. This is done to /// handle test/benches inheriting from dev/release, as well as forcing /// `for_host` units to always unwind. @@ -837,32 +892,35 @@ pub fn new_normal() -> UnitFor { UnitFor { host: false, - build_dep: false, + host_features: false, panic_setting: PanicSetting::ReadProfile, } } - /// A unit for a custom build script or its dependencies. + /// A unit for a custom build script or proc-macro or its dependencies. /// - /// The `build_dep` parameter is whether or not this is for a build - /// dependency. Build scripts for non-host units should use `false` - /// because they want to use the features of the package they are running - /// for. - pub fn new_build(build_dep: bool) -> UnitFor { + /// The `host_features` parameter is whether or not this is for a build + /// dependency or proc-macro (something that requires being built "on the + /// host"). Build scripts for non-host units should use `false` because + /// they want to use the features of the package they are running for. + pub fn new_host(host_features: bool) -> UnitFor { UnitFor { host: true, - build_dep, + host_features, // Force build scripts to always use `panic=unwind` for now to // maximally share dependencies with procedural macros. panic_setting: PanicSetting::AlwaysUnwind, } } - /// A unit for a proc macro or compiler plugin or their dependencies. + /// A unit for a compiler plugin or their dependencies. pub fn new_compiler() -> UnitFor { UnitFor { host: false, - build_dep: false, + // The feature resolver doesn't know which dependencies are + // plugins, so for now plugins don't split features. Since plugins + // are mostly deprecated, just leave this as false. + host_features: false, // Force plugins to use `panic=abort` so panics in the compiler do // not abort the process but instead end with a reasonable error // message that involves catching the panic in the compiler. @@ -879,7 +937,7 @@ pub fn new_test(config: &Config) -> UnitFor { UnitFor { host: false, - build_dep: false, + host_features: false, // We're testing out an unstable feature (`-Zpanic-abort-tests`) // which inherits the panic setting from the dev/release profile // (basically avoid recompiles) but historical defaults required @@ -902,7 +960,7 @@ pub fn with_for_host(self, for_host: bool) -> UnitFor { UnitFor { host: self.host || for_host, - build_dep: self.build_dep, + host_features: self.host_features, panic_setting: if for_host { PanicSetting::AlwaysUnwind } else { @@ -911,15 +969,16 @@ } } - /// Returns a new copy updating it for a build dependency. + /// Returns a new copy updating it whether or not it should use features + /// for build dependencies and proc-macros. /// /// This is part of the machinery responsible for handling feature /// decoupling for build dependencies in the new feature resolver. - pub fn with_build_dep(mut self, build_dep: bool) -> UnitFor { - if build_dep { + pub fn with_host_features(mut self, host_features: bool) -> UnitFor { + if host_features { assert!(self.host); } - self.build_dep = self.build_dep || build_dep; + self.host_features = self.host_features || host_features; self } @@ -929,8 +988,8 @@ self.host } - pub fn is_for_build_dep(&self) -> bool { - self.build_dep + pub fn is_for_host_features(&self) -> bool { + self.host_features } /// Returns how `panic` settings should be handled for this profile @@ -943,34 +1002,34 @@ static ALL: &[UnitFor] = &[ UnitFor { host: false, - build_dep: false, + host_features: false, panic_setting: PanicSetting::ReadProfile, }, UnitFor { host: true, - build_dep: false, + host_features: false, panic_setting: PanicSetting::AlwaysUnwind, }, UnitFor { host: false, - build_dep: false, + host_features: false, panic_setting: PanicSetting::AlwaysUnwind, }, UnitFor { host: false, - build_dep: false, + host_features: false, panic_setting: PanicSetting::Inherit, }, - // build_dep=true must always have host=true + // host_features=true must always have host=true // `Inherit` is not used in build dependencies. UnitFor { host: true, - build_dep: true, + host_features: true, panic_setting: PanicSetting::ReadProfile, }, UnitFor { host: true, - build_dep: true, + host_features: true, panic_setting: PanicSetting::AlwaysUnwind, }, ]; @@ -978,11 +1037,7 @@ } pub(crate) fn map_to_features_for(&self) -> FeaturesFor { - if self.is_for_build_dep() { - FeaturesFor::BuildDep - } else { - FeaturesFor::NormalOrDev - } + FeaturesFor::from_for_host(self.is_for_host_features()) } } diff -Nru cargo-0.44.1/src/cargo/core/registry.rs cargo-0.47.0/src/cargo/core/registry.rs --- cargo-0.44.1/src/cargo/core/registry.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/registry.rs 2020-07-17 20:39:39.000000000 +0000 @@ -9,6 +9,7 @@ use crate::core::{Dependency, PackageId, Source, SourceId, SourceMap, Summary}; use crate::sources::config::SourceConfigMap; use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::interning::InternedString; use crate::util::{profile, CanonicalUrl, Config}; /// Source of information about a group of packages. @@ -91,16 +92,13 @@ type LockedMap = HashMap< // The first level of key-ing done in this hash map is the source that // dependencies come from, identified by a `SourceId`. - SourceId, - HashMap< - // This next level is keyed by the name of the package... - String, - // ... and the value here is a list of tuples. The first element of each - // tuple is a package which has the source/name used to get to this - // point. The second element of each tuple is the list of locked - // dependencies that the first element has. - Vec<(PackageId, Vec)>, - >, + // The next level is keyed by the name of the package... + (SourceId, InternedString), + // ... and the value here is a list of tuples. The first element of each + // tuple is a package which has the source/name used to get to this + // point. The second element of each tuple is the list of locked + // dependencies that the first element has. + Vec<(PackageId, Vec)>, >; #[derive(PartialEq, Eq, Clone, Copy)] @@ -198,17 +196,20 @@ self.yanked_whitelist.extend(pkgs); } + /// remove all residual state from previous lock files. + pub fn clear_lock(&mut self) { + trace!("clear_lock"); + self.locked = HashMap::new(); + } + pub fn register_lock(&mut self, id: PackageId, deps: Vec) { trace!("register_lock: {}", id); for dep in deps.iter() { trace!("\t-> {}", dep); } - let sub_map = self + let sub_vec = self .locked - .entry(id.source_id()) - .or_insert_with(HashMap::new); - let sub_vec = sub_map - .entry(id.name().to_string()) + .entry((id.source_id(), id.name())) .or_insert_with(Vec::new); sub_vec.push((id, deps)); } @@ -222,16 +223,35 @@ /// the manifest. /// /// Here the `deps` will be resolved to a precise version and stored - /// internally for future calls to `query` below. It's expected that `deps` - /// have had `lock_to` call already, if applicable. (e.g., if a lock file was - /// already present). + /// internally for future calls to `query` below. `deps` should be a tuple + /// where the first element is the patch definition straight from the + /// manifest, and the second element is an optional variant where the + /// patch has been locked. This locked patch is the patch locked to + /// a specific version found in Cargo.lock. This will be `None` if + /// `Cargo.lock` doesn't exist, or the patch did not match any existing + /// entries in `Cargo.lock`. /// /// Note that the patch list specified here *will not* be available to /// `query` until `lock_patches` is called below, which should be called /// once all patches have been added. - pub fn patch(&mut self, url: &Url, deps: &[Dependency]) -> CargoResult<()> { + /// + /// The return value is a `Vec` of patches that should *not* be locked. + /// This happens when the patch is locked, but the patch has been updated + /// so the locked value is no longer correct. + pub fn patch( + &mut self, + url: &Url, + deps: &[(&Dependency, Option<(Dependency, PackageId)>)], + ) -> CargoResult> { + // NOTE: None of this code is aware of required features. If a patch + // is missing a required feature, you end up with an "unused patch" + // warning, which is very hard to understand. Ideally the warning + // would be tailored to indicate *why* it is unused. let canonical = CanonicalUrl::new(url)?; + // Return value of patches that shouldn't be locked. + let mut unlock_patches = Vec::new(); + // First up we need to actually resolve each `deps` specification to // precisely one summary. We're not using the `query` method below as it // internally uses maps we're building up as part of this method @@ -243,7 +263,15 @@ // of summaries which should be the same length as `deps` above. let unlocked_summaries = deps .iter() - .map(|dep| { + .map(|(orig_patch, locked)| { + // Remove double reference in orig_patch. Is there maybe a + // magic pattern that could avoid this? + let orig_patch = *orig_patch; + // Use the locked patch if it exists, otherwise use the original. + let dep = match locked { + Some((locked_patch, _locked_id)) => locked_patch, + None => orig_patch, + }; debug!( "registering a patch for `{}` with `{}`", url, @@ -261,30 +289,27 @@ ) })?; - let mut summaries = self + let source = self .sources .get_mut(dep.source_id()) - .expect("loaded source not present") - .query_vec(dep)? - .into_iter(); - - let summary = match summaries.next() { - Some(summary) => summary, - None => anyhow::bail!( - "patch for `{}` in `{}` did not resolve to any crates. If this is \ - unexpected, you may wish to consult: \ - https://github.com/rust-lang/cargo/issues/4678", - dep.package_name(), - url - ), - }; - if summaries.next().is_some() { - anyhow::bail!( - "patch for `{}` in `{}` resolved to more than one candidate", - dep.package_name(), - url - ) + .expect("loaded source not present"); + let summaries = source.query_vec(dep)?; + let (summary, should_unlock) = + summary_for_patch(orig_patch, locked, summaries, source).chain_err(|| { + format!( + "patch for `{}` in `{}` failed to resolve", + orig_patch.package_name(), + url, + ) + })?; + debug!( + "patch summary is {:?} should_unlock={:?}", + summary, should_unlock + ); + if let Some(unlock_id) = should_unlock { + unlock_patches.push((orig_patch.clone(), unlock_id)); } + if *summary.package_id().source_id().canonical_url() == canonical { anyhow::bail!( "patch for `{}` in `{}` points to the same source, but \ @@ -321,7 +346,7 @@ self.patches_available.insert(canonical.clone(), ids); self.patches.insert(canonical, unlocked_summaries); - Ok(()) + Ok(unlock_patches) } /// Lock all patch summaries added via `patch`, making them available to @@ -335,6 +360,7 @@ assert!(!self.patches_locked); for summaries in self.patches.values_mut() { for summary in summaries { + debug!("locking patch {:?}", summary); *summary = lock(&self.locked, &self.patches_available, summary.clone()); } } @@ -614,15 +640,14 @@ summary: Summary, ) -> Summary { let pair = locked - .get(&summary.source_id()) - .and_then(|map| map.get(&*summary.name())) + .get(&(summary.source_id(), summary.name())) .and_then(|vec| vec.iter().find(|&&(id, _)| id == summary.package_id())); trace!("locking summary of {}", summary.package_id()); // Lock the summary's ID if possible let summary = match pair { - Some((precise, _)) => summary.override_id(precise.clone()), + Some((precise, _)) => summary.override_id(*precise), None => summary, }; summary.map_dependencies(|dep| { @@ -704,8 +729,7 @@ // all known locked packages to see if they match this dependency. // If anything does then we lock it to that and move on. let v = locked - .get(&dep.source_id()) - .and_then(|map| map.get(&*dep.package_name())) + .get(&(dep.source_id(), dep.package_name())) .and_then(|vec| vec.iter().find(|&&(id, _)| dep.matches_id(id))); if let Some(&(id, _)) = v { trace!("\tsecond hit on {}", id); @@ -718,3 +742,97 @@ dep }) } + +/// This is a helper for selecting the summary, or generating a helpful error message. +fn summary_for_patch( + orig_patch: &Dependency, + locked: &Option<(Dependency, PackageId)>, + mut summaries: Vec, + source: &mut dyn Source, +) -> CargoResult<(Summary, Option)> { + if summaries.len() == 1 { + return Ok((summaries.pop().unwrap(), None)); + } + if summaries.len() > 1 { + // TODO: In the future, it might be nice to add all of these + // candidates so that version selection would just pick the + // appropriate one. However, as this is currently structured, if we + // added these all as patches, the unselected versions would end up in + // the "unused patch" listing, and trigger a warning. It might take a + // fair bit of restructuring to make that work cleanly, and there + // isn't any demand at this time to support that. + let mut vers: Vec<_> = summaries.iter().map(|summary| summary.version()).collect(); + vers.sort(); + let versions: Vec<_> = vers.into_iter().map(|v| v.to_string()).collect(); + anyhow::bail!( + "patch for `{}` in `{}` resolved to more than one candidate\n\ + Found versions: {}\n\ + Update the patch definition to select only one package.\n\ + For example, add an `=` version requirement to the patch definition, \ + such as `version = \"={}\"`.", + orig_patch.package_name(), + orig_patch.source_id(), + versions.join(", "), + versions.last().unwrap() + ); + } + assert!(summaries.is_empty()); + // No summaries found, try to help the user figure out what is wrong. + if let Some((_locked_patch, locked_id)) = locked { + // Since the locked patch did not match anything, try the unlocked one. + let orig_matches = source.query_vec(orig_patch).unwrap_or_else(|e| { + log::warn!( + "could not determine unlocked summaries for dep {:?}: {:?}", + orig_patch, + e + ); + Vec::new() + }); + let (summary, _) = summary_for_patch(orig_patch, &None, orig_matches, source)?; + // The unlocked version found a match. This returns a value to + // indicate that this entry should be unlocked. + return Ok((summary, Some(*locked_id))); + } + // Try checking if there are *any* packages that match this by name. + let name_only_dep = Dependency::new_override(orig_patch.package_name(), orig_patch.source_id()); + let name_summaries = source.query_vec(&name_only_dep).unwrap_or_else(|e| { + log::warn!( + "failed to do name-only summary query for {:?}: {:?}", + name_only_dep, + e + ); + Vec::new() + }); + let mut vers = name_summaries + .iter() + .map(|summary| summary.version()) + .collect::>(); + let found = match vers.len() { + 0 => format!(""), + 1 => format!("version `{}`", vers[0]), + _ => { + vers.sort(); + let strs: Vec<_> = vers.into_iter().map(|v| v.to_string()).collect(); + format!("versions `{}`", strs.join(", ")) + } + }; + if found.is_empty() { + anyhow::bail!( + "The patch location `{}` does not appear to contain any packages \ + matching the name `{}`.", + orig_patch.source_id(), + orig_patch.package_name() + ); + } else { + anyhow::bail!( + "The patch location `{}` contains a `{}` package with {}, but the patch \ + definition requires `{}`.\n\ + Check that the version in the patch location is what you expect, \ + and update the patch definition to match.", + orig_patch.source_id(), + orig_patch.package_name(), + found, + orig_patch.version_req() + ); + } +} diff -Nru cargo-0.44.1/src/cargo/core/resolver/conflict_cache.rs cargo-0.47.0/src/cargo/core/resolver/conflict_cache.rs --- cargo-0.44.1/src/cargo/core/resolver/conflict_cache.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/resolver/conflict_cache.rs 2020-07-17 20:39:39.000000000 +0000 @@ -175,7 +175,7 @@ dep: &Dependency, must_contain: Option, ) -> Option<&ConflictMap> { - let out = self.find(dep, &|id| cx.is_active(id), must_contain, std::usize::MAX); + let out = self.find(dep, &|id| cx.is_active(id), must_contain, usize::MAX); if cfg!(debug_assertions) { if let Some(c) = &out { assert!(cx.is_conflicting(None, c).is_some()); @@ -213,7 +213,7 @@ for c in con.keys() { self.dep_from_pid - .entry(c.clone()) + .entry(*c) .or_insert_with(HashSet::new) .insert(dep.clone()); } diff -Nru cargo-0.44.1/src/cargo/core/resolver/context.rs cargo-0.47.0/src/cargo/core/resolver/context.rs --- cargo-0.44.1/src/cargo/core/resolver/context.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/resolver/context.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,12 +1,11 @@ use std::collections::HashMap; use std::num::NonZeroU64; -use std::rc::Rc; use anyhow::format_err; use log::debug; -use crate::core::interning::InternedString; use crate::core::{Dependency, PackageId, SourceId, Summary}; +use crate::util::interning::InternedString; use crate::util::Graph; use super::dep_cache::RegistryQueryer; @@ -35,7 +34,7 @@ /// a way to look up for a package in activations what packages required it /// and all of the exact deps that it fulfilled. - pub parents: Graph>>, + pub parents: Graph>, } /// When backtracking it can be useful to know how far back to go. @@ -255,8 +254,8 @@ .collect() } - pub fn graph(&self) -> Graph> { - let mut graph: Graph> = Graph::new(); + pub fn graph(&self) -> Graph> { + let mut graph: Graph> = Graph::new(); self.activations .values() .for_each(|(r, _)| graph.add(r.package_id())); @@ -265,14 +264,14 @@ for (o, e) in self.parents.edges(i) { let old_link = graph.link(*o, *i); assert!(old_link.is_empty()); - *old_link = e.to_vec(); + *old_link = e.iter().cloned().collect(); } } graph } } -impl Graph>> { +impl Graph> { pub fn parents_of(&self, p: PackageId) -> impl Iterator + '_ { self.edges(&p) .map(|(grand, d)| (*grand, d.iter().any(|x| x.is_public()))) @@ -338,7 +337,7 @@ parent_pid: PackageId, is_public: bool, age: ContextAge, - parents: &Graph>>, + parents: &Graph>, ) { // one tricky part is that `candidate_pid` may already be active and // have public dependencies of its own. So we not only need to mark @@ -383,7 +382,7 @@ b_id: PackageId, parent: PackageId, is_public: bool, - parents: &Graph>>, + parents: &Graph>, ) -> Result< (), ( diff -Nru cargo-0.44.1/src/cargo/core/resolver/dep_cache.rs cargo-0.47.0/src/cargo/core/resolver/dep_cache.rs --- cargo-0.44.1/src/cargo/core/resolver/dep_cache.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/resolver/dep_cache.rs 2020-07-17 20:39:39.000000000 +0000 @@ -15,11 +15,11 @@ use log::debug; -use crate::core::interning::InternedString; use crate::core::resolver::context::Context; use crate::core::resolver::errors::describe_path; use crate::core::{Dependency, FeatureValue, PackageId, PackageIdSpec, Registry, Summary}; use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::interning::InternedString; use crate::core::resolver::types::{ConflictReason, DepInfo, FeaturesSet}; use crate::core::resolver::{ActivateResult, ResolveOpts}; @@ -402,15 +402,13 @@ // If `package` is indeed an optional dependency then we activate the // feature named `package`, but otherwise if `package` is a required // dependency then there's no feature associated with it. - if let Some(dep) = self + if self .summary .dependencies() .iter() - .find(|p| p.name_in_toml() == package) + .any(|dep| dep.name_in_toml() == package && dep.is_optional()) { - if dep.is_optional() { - self.used.insert(package); - } + self.used.insert(package); } self.deps .entry(package) diff -Nru cargo-0.44.1/src/cargo/core/resolver/encode.rs cargo-0.47.0/src/cargo/core/resolver/encode.rs --- cargo-0.44.1/src/cargo/core/resolver/encode.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/resolver/encode.rs 2020-07-17 20:39:39.000000000 +0000 @@ -98,9 +98,9 @@ use serde::ser; use serde::{Deserialize, Serialize}; -use crate::core::InternedString; use crate::core::{Dependency, Package, PackageId, SourceId, Workspace}; use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::interning::InternedString; use crate::util::{internal, Graph}; use super::{Resolve, ResolveVersion}; @@ -260,7 +260,7 @@ let mut g = Graph::new(); for &(ref id, _) in live_pkgs.values() { - g.add(id.clone()); + g.add(*id); } for &(ref id, pkg) in live_pkgs.values() { @@ -271,7 +271,7 @@ for edge in deps.iter() { if let Some(to_depend_on) = lookup_id(edge) { - g.link(id.clone(), to_depend_on); + g.link(*id, to_depend_on); } } } @@ -282,7 +282,7 @@ if let Some(ref replace) = pkg.replace { assert!(pkg.dependencies.is_none()); if let Some(replace_id) = lookup_id(replace) { - replacements.insert(id.clone(), replace_id); + replacements.insert(*id, replace_id); } } } diff -Nru cargo-0.44.1/src/cargo/core/resolver/errors.rs cargo-0.47.0/src/cargo/core/resolver/errors.rs --- cargo-0.44.1/src/cargo/core/resolver/errors.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/resolver/errors.rs 2020-07-17 20:39:39.000000000 +0000 @@ -219,8 +219,8 @@ }; let mut msg = format!( - "failed to select a version for the requirement `{} = \"{}\"`\n \ - candidate versions found which didn't match: {}\n \ + "failed to select a version for the requirement `{} = \"{}\"`\n\ + candidate versions found which didn't match: {}\n\ location searched: {}\n", dep.package_name(), dep.version_req(), diff -Nru cargo-0.44.1/src/cargo/core/resolver/features.rs cargo-0.47.0/src/cargo/core/resolver/features.rs --- cargo-0.44.1/src/cargo/core/resolver/features.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/resolver/features.rs 2020-07-17 20:39:39.000000000 +0000 @@ -41,16 +41,17 @@ use crate::core::compiler::{CompileKind, RustcTargetData}; use crate::core::dependency::{DepKind, Dependency}; use crate::core::resolver::types::FeaturesSet; -use crate::core::resolver::Resolve; -use crate::core::{FeatureValue, InternedString, PackageId, PackageIdSpec, Workspace}; -use crate::util::{CargoResult, Config}; +use crate::core::resolver::{Resolve, ResolveBehavior}; +use crate::core::{FeatureValue, PackageId, PackageIdSpec, PackageSet, Workspace}; +use crate::util::interning::InternedString; +use crate::util::CargoResult; use std::collections::{BTreeSet, HashMap, HashSet}; use std::rc::Rc; /// Map of activated features. /// /// The key is `(PackageId, bool)` where the bool is `true` if these -/// are features for a build dependency. +/// are features for a build dependency or proc-macro. type ActivateMap = HashMap<(PackageId, bool), BTreeSet>; /// Set of all activated features for all packages in the resolve graph. @@ -68,8 +69,8 @@ package_features: bool, /// -Zfeatures is enabled, use new resolver. new_resolver: bool, - /// Build deps will not share share features with other dep kinds. - decouple_build_deps: bool, + /// Build deps and proc-macros will not share share features with other dep kinds. + decouple_host_deps: bool, /// Dev dep features will not be activated unless needed. decouple_dev_deps: bool, /// Targets that are not in use will not activate features. @@ -85,38 +86,60 @@ /// dependencies are computed, and can result in longer build times with /// `cargo test` because the lib may need to be built 3 times instead of /// twice. +#[derive(Copy, Clone, PartialEq)] pub enum HasDevUnits { Yes, No, } +/// Flag to indicate that target-specific filtering should be disabled. +#[derive(Copy, Clone, PartialEq)] +pub enum ForceAllTargets { + Yes, + No, +} + /// Flag to indicate if features are requested for a build dependency or not. -#[derive(PartialEq)] +#[derive(Copy, Clone, Debug, PartialEq)] pub enum FeaturesFor { NormalOrDev, - BuildDep, + /// Build dependency or proc-macro. + HostDep, +} + +impl FeaturesFor { + pub fn from_for_host(for_host: bool) -> FeaturesFor { + if for_host { + FeaturesFor::HostDep + } else { + FeaturesFor::NormalOrDev + } + } } impl FeatureOpts { - fn new(config: &Config, has_dev_units: HasDevUnits) -> CargoResult { + fn new( + ws: &Workspace<'_>, + has_dev_units: HasDevUnits, + force_all_targets: ForceAllTargets, + ) -> CargoResult { let mut opts = FeatureOpts::default(); - let unstable_flags = config.cli_unstable(); + let unstable_flags = ws.config().cli_unstable(); opts.package_features = unstable_flags.package_features; let mut enable = |feat_opts: &Vec| { opts.new_resolver = true; for opt in feat_opts { match opt.as_ref() { - "build_dep" => opts.decouple_build_deps = true, + "build_dep" | "host_dep" => opts.decouple_host_deps = true, "dev_dep" => opts.decouple_dev_deps = true, "itarget" => opts.ignore_inactive_targets = true, "all" => { - opts.decouple_build_deps = true; + opts.decouple_host_deps = true; opts.decouple_dev_deps = true; opts.ignore_inactive_targets = true; } "compare" => opts.compare = true, "ws" => unimplemented!(), - "host" => unimplemented!(), s => anyhow::bail!("-Zfeatures flag `{}` is not supported", s), } } @@ -125,6 +148,12 @@ if let Some(feat_opts) = unstable_flags.features.as_ref() { enable(feat_opts)?; } + match ws.resolve_behavior() { + ResolveBehavior::V1 => {} + ResolveBehavior::V2 => { + enable(&vec!["all".to_string()]).unwrap(); + } + } // This env var is intended for testing only. if let Ok(env_opts) = std::env::var("__CARGO_FORCE_NEW_FEATURES") { if env_opts == "1" { @@ -135,8 +164,12 @@ } } if let HasDevUnits::Yes = has_dev_units { + // Dev deps cannot be decoupled when they are in use. opts.decouple_dev_deps = false; } + if let ForceAllTargets::Yes = force_all_targets { + opts.ignore_inactive_targets = false; + } Ok(opts) } } @@ -190,36 +223,34 @@ pkg_id: PackageId, features_for: FeaturesFor, ) -> Vec { - self.activated_features_int(pkg_id, features_for, true) + self.activated_features_int(pkg_id, features_for) + .expect("activated_features for invalid package") } - /// Variant of `activated_features` that returns an empty Vec if this is - /// not a valid pkg_id/is_build combination. Used by `cargo clean` which - /// doesn't know the exact set. + /// Variant of `activated_features` that returns `None` if this is + /// not a valid pkg_id/is_build combination. Used in places which do + /// not know which packages are activated (like `cargo clean`). pub fn activated_features_unverified( &self, pkg_id: PackageId, features_for: FeaturesFor, - ) -> Vec { - self.activated_features_int(pkg_id, features_for, false) + ) -> Option> { + self.activated_features_int(pkg_id, features_for).ok() } fn activated_features_int( &self, pkg_id: PackageId, features_for: FeaturesFor, - verify: bool, - ) -> Vec { + ) -> CargoResult> { if let Some(legacy) = &self.legacy { - legacy.get(&pkg_id).map_or_else(Vec::new, |v| v.clone()) + Ok(legacy.get(&pkg_id).map_or_else(Vec::new, |v| v.clone())) } else { - let is_build = self.opts.decouple_build_deps && features_for == FeaturesFor::BuildDep; + let is_build = self.opts.decouple_host_deps && features_for == FeaturesFor::HostDep; if let Some(fs) = self.activated_features.get(&(pkg_id, is_build)) { - fs.iter().cloned().collect() - } else if verify { - panic!("features did not find {:?} {:?}", pkg_id, is_build) + Ok(fs.iter().cloned().collect()) } else { - Vec::new() + anyhow::bail!("features did not find {:?} {:?}", pkg_id, is_build) } } } @@ -228,9 +259,10 @@ pub struct FeatureResolver<'a, 'cfg> { ws: &'a Workspace<'cfg>, target_data: &'a RustcTargetData, - /// The platform to build for, requested by the user. - requested_target: CompileKind, + /// The platforms to build for, requested by the user. + requested_targets: &'a [CompileKind], resolve: &'a Resolve, + package_set: &'a PackageSet<'cfg>, /// Options that change how the feature resolver operates. opts: FeatureOpts, /// Map of features activated for each package. @@ -247,15 +279,17 @@ ws: &Workspace<'cfg>, target_data: &RustcTargetData, resolve: &Resolve, + package_set: &'a PackageSet<'cfg>, requested_features: &RequestedFeatures, specs: &[PackageIdSpec], - requested_target: CompileKind, + requested_targets: &[CompileKind], has_dev_units: HasDevUnits, + force_all_targets: ForceAllTargets, ) -> CargoResult { use crate::util::profile; let _p = profile::start("resolve features"); - let opts = FeatureOpts::new(ws.config(), has_dev_units)?; + let opts = FeatureOpts::new(ws, has_dev_units, force_all_targets)?; if !opts.new_resolver { // Legacy mode. return Ok(ResolvedFeatures { @@ -267,8 +301,9 @@ let mut r = FeatureResolver { ws, target_data, - requested_target, + requested_targets, resolve, + package_set, opts, activated_features: HashMap::new(), processed_deps: HashSet::new(), @@ -294,7 +329,18 @@ let member_features = self.ws.members_with_features(specs, requested_features)?; for (member, requested_features) in &member_features { let fvs = self.fvs_from_requested(member.package_id(), requested_features); - self.activate_pkg(member.package_id(), &fvs, false)?; + let for_host = self.is_proc_macro(member.package_id()); + self.activate_pkg(member.package_id(), &fvs, for_host)?; + if for_host { + // Also activate without for_host. This is needed if the + // proc-macro includes other targets (like binaries or tests), + // or running in `cargo test`. Note that in a workspace, if + // the proc-macro is selected on the command like (like with + // `--workspace`), this forces feature unification with normal + // dependencies. This is part of the bigger problem where + // features depend on which packages are built. + self.activate_pkg(member.package_id(), &fvs, false)?; + } } Ok(()) } @@ -303,18 +349,18 @@ &mut self, pkg_id: PackageId, fvs: &[FeatureValue], - for_build: bool, + for_host: bool, ) -> CargoResult<()> { // Add an empty entry to ensure everything is covered. This is intended for // finding bugs where the resolver missed something it should have visited. // Remove this in the future if `activated_features` uses an empty default. self.activated_features - .entry((pkg_id, for_build)) + .entry((pkg_id, self.opts.decouple_host_deps && for_host)) .or_insert_with(BTreeSet::new); for fv in fvs { - self.activate_fv(pkg_id, fv, for_build)?; + self.activate_fv(pkg_id, fv, for_host)?; } - if !self.processed_deps.insert((pkg_id, for_build)) { + if !self.processed_deps.insert((pkg_id, for_host)) { // Already processed dependencies. There's no need to process them // again. This is primarily to avoid cycles, but also helps speed // things up. @@ -330,8 +376,8 @@ // features that enable other features. return Ok(()); } - for (dep_pkg_id, deps) in self.deps(pkg_id, for_build) { - for (dep, dep_for_build) in deps { + for (dep_pkg_id, deps) in self.deps(pkg_id, for_host) { + for (dep, dep_for_host) in deps { if dep.is_optional() { // Optional dependencies are enabled in `activate_fv` when // a feature enables it. @@ -339,7 +385,7 @@ } // Recurse into the dependency. let fvs = self.fvs_from_dependency(dep_pkg_id, dep); - self.activate_pkg(dep_pkg_id, &fvs, dep_for_build)?; + self.activate_pkg(dep_pkg_id, &fvs, dep_for_host)?; } } Ok(()) @@ -350,42 +396,42 @@ &mut self, pkg_id: PackageId, fv: &FeatureValue, - for_build: bool, + for_host: bool, ) -> CargoResult<()> { match fv { FeatureValue::Feature(f) => { - self.activate_rec(pkg_id, *f, for_build)?; + self.activate_rec(pkg_id, *f, for_host)?; } FeatureValue::Crate(dep_name) => { // Activate the feature name on self. - self.activate_rec(pkg_id, *dep_name, for_build)?; + self.activate_rec(pkg_id, *dep_name, for_host)?; // Activate the optional dep. - for (dep_pkg_id, deps) in self.deps(pkg_id, for_build) { - for (dep, dep_for_build) in deps { + for (dep_pkg_id, deps) in self.deps(pkg_id, for_host) { + for (dep, dep_for_host) in deps { if dep.name_in_toml() != *dep_name { continue; } let fvs = self.fvs_from_dependency(dep_pkg_id, dep); - self.activate_pkg(dep_pkg_id, &fvs, dep_for_build)?; + self.activate_pkg(dep_pkg_id, &fvs, dep_for_host)?; } } } FeatureValue::CrateFeature(dep_name, dep_feature) => { // Activate a feature within a dependency. - for (dep_pkg_id, deps) in self.deps(pkg_id, for_build) { - for (dep, dep_for_build) in deps { + for (dep_pkg_id, deps) in self.deps(pkg_id, for_host) { + for (dep, dep_for_host) in deps { if dep.name_in_toml() != *dep_name { continue; } if dep.is_optional() { // Activate the crate on self. let fv = FeatureValue::Crate(*dep_name); - self.activate_fv(pkg_id, &fv, for_build)?; + self.activate_fv(pkg_id, &fv, for_host)?; } // Activate the feature on the dependency. let summary = self.resolve.summary(dep_pkg_id); let fv = FeatureValue::new(*dep_feature, summary); - self.activate_fv(dep_pkg_id, &fv, dep_for_build)?; + self.activate_fv(dep_pkg_id, &fv, dep_for_host)?; } } } @@ -399,11 +445,11 @@ &mut self, pkg_id: PackageId, feature_to_enable: InternedString, - for_build: bool, + for_host: bool, ) -> CargoResult<()> { let enabled = self .activated_features - .entry((pkg_id, for_build)) + .entry((pkg_id, self.opts.decouple_host_deps && for_host)) .or_insert_with(BTreeSet::new); if !enabled.insert(feature_to_enable) { // Already enabled. @@ -426,7 +472,7 @@ } }; for fv in fvs { - self.activate_fv(pkg_id, fv, for_build)?; + self.activate_fv(pkg_id, fv, for_host)?; } Ok(()) } @@ -462,9 +508,9 @@ .collect(); // Add optional deps. // Top-level requested features can never apply to - // build-dependencies, so for_build is `false` here. + // build-dependencies, so for_host is `false` here. for (_dep_pkg_id, deps) in self.deps(pkg_id, false) { - for (dep, _dep_for_build) in deps { + for (dep, _dep_for_host) in deps { if dep.is_optional() { // This may result in duplicates, but that should be ok. fvs.push(FeatureValue::Crate(dep.name_in_toml())); @@ -491,25 +537,27 @@ fn deps( &self, pkg_id: PackageId, - for_build: bool, + for_host: bool, ) -> Vec<(PackageId, Vec<(&'a Dependency, bool)>)> { // Helper for determining if a platform is activated. let platform_activated = |dep: &Dependency| -> bool { // We always care about build-dependencies, and they are always // Host. If we are computing dependencies "for a build script", // even normal dependencies are host-only. - if for_build || dep.is_build() { + if for_host || dep.is_build() { return self .target_data .dep_platform_activated(dep, CompileKind::Host); } // Not a build dependency, and not for a build script, so must be Target. - self.target_data - .dep_platform_activated(dep, self.requested_target) + self.requested_targets + .iter() + .any(|kind| self.target_data.dep_platform_activated(dep, *kind)) }; self.resolve .deps(pkg_id) .map(|(dep_id, deps)| { + let is_proc_macro = self.is_proc_macro(dep_id); let deps = deps .iter() .filter(|dep| { @@ -525,9 +573,8 @@ true }) .map(|dep| { - let dep_for_build = - for_build || (self.opts.decouple_build_deps && dep.is_build()); - (dep, dep_for_build) + let dep_for_host = for_host || dep.is_build() || is_proc_macro; + (dep, dep_for_host) }) .collect::>(); (dep_id, deps) @@ -542,9 +589,13 @@ for ((pkg_id, dep_kind), features) in &self.activated_features { let r_features = self.resolve.features(*pkg_id); if !r_features.iter().eq(features.iter()) { - eprintln!( + crate::drop_eprintln!( + self.ws.config(), "{}/{:?} features mismatch\nresolve: {:?}\nnew: {:?}\n", - pkg_id, dep_kind, r_features, features + pkg_id, + dep_kind, + r_features, + features ); found = true; } @@ -553,4 +604,11 @@ panic!("feature mismatch"); } } + + fn is_proc_macro(&self, package_id: PackageId) -> bool { + self.package_set + .get_one(package_id) + .expect("packages downloaded") + .proc_macro() + } } diff -Nru cargo-0.44.1/src/cargo/core/resolver/mod.rs cargo-0.47.0/src/cargo/core/resolver/mod.rs --- cargo-0.44.1/src/cargo/core/resolver/mod.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/resolver/mod.rs 2020-07-17 20:39:39.000000000 +0000 @@ -69,9 +69,9 @@ pub use self::encode::Metadata; pub use self::encode::{EncodableDependency, EncodablePackageId, EncodableResolve}; pub use self::errors::{ActivateError, ActivateResult, ResolveError}; -pub use self::features::HasDevUnits; +pub use self::features::{ForceAllTargets, HasDevUnits}; pub use self::resolve::{Resolve, ResolveVersion}; -pub use self::types::ResolveOpts; +pub use self::types::{ResolveBehavior, ResolveOpts}; mod conflict_cache; mod context; @@ -609,12 +609,11 @@ cx.age += 1; if let Some((parent, dep)) = parent { let parent_pid = parent.package_id(); - Rc::make_mut( - // add a edge from candidate to parent in the parents graph - cx.parents.link(candidate_pid, parent_pid), - ) - // and associate dep with that edge - .push(dep.clone()); + // add a edge from candidate to parent in the parents graph + cx.parents + .link(candidate_pid, parent_pid) + // and associate dep with that edge + .insert(dep.clone()); if let Some(public_dependency) = cx.public_dependency.as_mut() { public_dependency.add_edge( candidate_pid, @@ -845,7 +844,7 @@ for (critical_parent, critical_parents_deps) in cx.parents.edges(&backtrack_critical_id).filter(|(p, _)| { // it will only help backjump further if it is older then the critical_age - cx.is_active(*p).expect("parent not currently active!?") < backtrack_critical_age + cx.is_active(**p).expect("parent not currently active!?") < backtrack_critical_age }) { for critical_parents_dep in critical_parents_deps.iter() { diff -Nru cargo-0.44.1/src/cargo/core/resolver/resolve.rs cargo-0.47.0/src/cargo/core/resolver/resolve.rs --- cargo-0.44.1/src/cargo/core/resolver/resolve.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/resolver/resolve.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,8 +1,8 @@ use super::encode::Metadata; use crate::core::dependency::DepKind; -use crate::core::interning::InternedString; use crate::core::{Dependency, PackageId, PackageIdSpec, Summary, Target}; use crate::util::errors::CargoResult; +use crate::util::interning::InternedString; use crate::util::Graph; use std::borrow::Borrow; use std::cmp; @@ -16,14 +16,14 @@ /// for each package. pub struct Resolve { /// A graph, whose vertices are packages and edges are dependency specifications - /// from `Cargo.toml`. We need a `Vec` here because the same package + /// from `Cargo.toml`. We need a `HashSet` here because the same package /// might be present in both `[dependencies]` and `[build-dependencies]`. - graph: Graph>, + graph: Graph>, /// Replacements from the `[replace]` table. replacements: HashMap, /// Inverted version of `replacements`. reverse_replacements: HashMap, - /// An empty `HashSet` to avoid creating a new `HashSet` for every package + /// An empty `Vec` to avoid creating a new `Vec` for every package /// that does not have any features, and to avoid using `Option` to /// simplify the API. empty_features: Vec, @@ -70,7 +70,7 @@ impl Resolve { pub fn new( - graph: Graph>, + graph: Graph>, replacements: HashMap, features: HashMap>, checksums: HashMap>, @@ -119,10 +119,9 @@ pub fn register_used_patches(&mut self, patches: &[Summary]) { for summary in patches { - if self.iter().any(|id| id == summary.package_id()) { - continue; - } - self.unused_patches.push(summary.package_id()); + if !self.graph.contains(&summary.package_id()) { + self.unused_patches.push(summary.package_id()) + }; } } @@ -264,7 +263,7 @@ self.graph.iter().cloned() } - pub fn deps(&self, pkg: PackageId) -> impl Iterator { + pub fn deps(&self, pkg: PackageId) -> impl Iterator)> { self.deps_not_replaced(pkg) .map(move |(id, deps)| (self.replacement(id).unwrap_or(id), deps)) } @@ -272,10 +271,8 @@ pub fn deps_not_replaced( &self, pkg: PackageId, - ) -> impl Iterator { - self.graph - .edges(&pkg) - .map(|(id, deps)| (*id, deps.as_slice())) + ) -> impl Iterator)> { + self.graph.edges(&pkg).map(|(id, deps)| (*id, deps)) } pub fn replacement(&self, pkg: PackageId) -> Option { @@ -307,6 +304,10 @@ PackageIdSpec::query_str(spec, self.iter()) } + pub fn specs_to_ids(&self, specs: &[PackageIdSpec]) -> CargoResult> { + specs.iter().map(|s| s.query(self.iter())).collect() + } + pub fn unused_patches(&self) -> &[PackageId] { &self.unused_patches } @@ -325,8 +326,9 @@ to: PackageId, to_target: &Target, ) -> CargoResult { + let empty_set: HashSet = HashSet::new(); let deps = if from == to { - &[] + &empty_set } else { self.dependencies_listed(from, to) }; @@ -349,7 +351,7 @@ Ok(name) } - fn dependencies_listed(&self, from: PackageId, to: PackageId) -> &[Dependency] { + fn dependencies_listed(&self, from: PackageId, to: PackageId) -> &HashSet { // We've got a dependency on `from` to `to`, but this dependency edge // may be affected by [replace]. If the `to` package is listed as the // target of a replacement (aka the key of a reverse replacement map) diff -Nru cargo-0.44.1/src/cargo/core/resolver/types.rs cargo-0.47.0/src/cargo/core/resolver/types.rs --- cargo-0.44.1/src/cargo/core/resolver/types.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/resolver/types.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,7 +1,7 @@ use super::features::RequestedFeatures; -use crate::core::interning::InternedString; use crate::core::{Dependency, PackageId, Summary}; use crate::util::errors::CargoResult; +use crate::util::interning::InternedString; use crate::util::Config; use std::cmp::Ordering; use std::collections::{BTreeMap, BTreeSet}; @@ -97,6 +97,35 @@ /// optimized comparison operators like `is_subset` at the interfaces. pub type FeaturesSet = Rc>; +/// Resolver behavior, used to opt-in to new behavior that is +/// backwards-incompatible via the `resolver` field in the manifest. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub enum ResolveBehavior { + /// V1 is the original resolver behavior. + V1, + /// V2 adds the new feature resolver. + V2, +} + +impl ResolveBehavior { + pub fn from_manifest(resolver: &str) -> CargoResult { + match resolver { + "2" => Ok(ResolveBehavior::V2), + s => anyhow::bail!( + "`resolver` setting `{}` is not valid, only valid option is \"2\"", + s + ), + } + } + + pub fn to_manifest(&self) -> Option { + match self { + ResolveBehavior::V1 => None, + ResolveBehavior::V2 => Some("2".to_string()), + } + } +} + /// Options for how the resolve should work. #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct ResolveOpts { diff -Nru cargo-0.44.1/src/cargo/core/shell.rs cargo-0.47.0/src/cargo/core/shell.rs --- cargo-0.44.1/src/cargo/core/shell.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/shell.rs 2020-07-17 20:39:39.000000000 +0000 @@ -14,13 +14,13 @@ Quiet, } -/// An abstraction around a `Write`able object that remembers preferences for output verbosity and -/// color. +/// An abstraction around console output that remembers preferences for output +/// verbosity and color. pub struct Shell { - /// the `Write`able object, either with or without color support (represented by different enum - /// variants) - err: ShellOut, - /// How verbose messages should be + /// Wrapper around stdout/stderr. This helps with supporting sending + /// output to a memory buffer which is useful for tests. + output: ShellOut, + /// How verbose messages should be. verbosity: Verbosity, /// Flag that indicates the current line needs to be cleared before /// printing. Used when a progress bar is currently displayed. @@ -29,7 +29,7 @@ impl fmt::Debug for Shell { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.err { + match self.output { ShellOut::Write(_) => f .debug_struct("Shell") .field("verbosity", &self.verbosity) @@ -49,8 +49,9 @@ Write(Box), /// Color-enabled stdio, with information on whether color should be used Stream { - stream: StandardStream, - tty: bool, + stdout: StandardStream, + stderr: StandardStream, + stderr_tty: bool, color_choice: ColorChoice, }, } @@ -70,11 +71,13 @@ /// Creates a new shell (color choice and verbosity), defaulting to 'auto' color and verbose /// output. pub fn new() -> Shell { + let auto = ColorChoice::CargoAuto.to_termcolor_color_choice(); Shell { - err: ShellOut::Stream { - stream: StandardStream::stderr(ColorChoice::CargoAuto.to_termcolor_color_choice()), + output: ShellOut::Stream { + stdout: StandardStream::stdout(auto), + stderr: StandardStream::stderr(auto), color_choice: ColorChoice::CargoAuto, - tty: atty::is(atty::Stream::Stderr), + stderr_tty: atty::is(atty::Stream::Stderr), }, verbosity: Verbosity::Verbose, needs_clear: false, @@ -84,7 +87,7 @@ /// Creates a shell from a plain writable object, with no color, and max verbosity. pub fn from_write(out: Box) -> Shell { Shell { - err: ShellOut::Write(out), + output: ShellOut::Write(out), verbosity: Verbosity::Verbose, needs_clear: false, } @@ -105,18 +108,12 @@ if self.needs_clear { self.err_erase_line(); } - self.err.print(status, message, color, justified) + self.output + .message_stderr(status, message, color, justified) } } } - pub fn stdout_println(&mut self, message: impl fmt::Display) { - if self.needs_clear { - self.err_erase_line(); - } - println!("{}", message); - } - /// Sets whether the next print should clear the current line. pub fn set_needs_clear(&mut self, needs_clear: bool) { self.needs_clear = needs_clear; @@ -129,31 +126,44 @@ /// Returns the width of the terminal in spaces, if any. pub fn err_width(&self) -> Option { - match self.err { - ShellOut::Stream { tty: true, .. } => imp::stderr_width(), + match self.output { + ShellOut::Stream { + stderr_tty: true, .. + } => imp::stderr_width(), _ => None, } } /// Returns `true` if stderr is a tty. pub fn is_err_tty(&self) -> bool { - match self.err { - ShellOut::Stream { tty, .. } => tty, + match self.output { + ShellOut::Stream { stderr_tty, .. } => stderr_tty, _ => false, } } - /// Gets a reference to the underlying writer. + /// Gets a reference to the underlying stdout writer. + pub fn out(&mut self) -> &mut dyn Write { + if self.needs_clear { + self.err_erase_line(); + } + self.output.stdout() + } + + /// Gets a reference to the underlying stderr writer. pub fn err(&mut self) -> &mut dyn Write { if self.needs_clear { self.err_erase_line(); } - self.err.as_write() + self.output.stderr() } /// Erase from cursor to end of line. pub fn err_erase_line(&mut self) { - if let ShellOut::Stream { tty: true, .. } = self.err { + if let ShellOut::Stream { + stderr_tty: true, .. + } = self.output + { imp::err_erase_line(self); self.needs_clear = false; } @@ -216,7 +226,8 @@ if self.needs_clear { self.err_erase_line(); } - self.err.print(&"error", Some(&message), Red, false) + self.output + .message_stderr(&"error", Some(&message), Red, false) } /// Prints an amber 'warning' message. @@ -245,10 +256,11 @@ /// Updates the color choice (always, never, or auto) from a string.. pub fn set_color_choice(&mut self, color: Option<&str>) -> CargoResult<()> { if let ShellOut::Stream { - ref mut stream, + ref mut stdout, + ref mut stderr, ref mut color_choice, .. - } = self.err + } = self.output { let cfg = match color { Some("always") => ColorChoice::Always, @@ -263,7 +275,9 @@ ), }; *color_choice = cfg; - *stream = StandardStream::stderr(cfg.to_termcolor_color_choice()); + let choice = cfg.to_termcolor_color_choice(); + *stdout = StandardStream::stdout(choice); + *stderr = StandardStream::stderr(choice); } Ok(()) } @@ -273,17 +287,17 @@ /// If we are not using a color stream, this will always return `Never`, even if the color /// choice has been set to something else. pub fn color_choice(&self) -> ColorChoice { - match self.err { + match self.output { ShellOut::Stream { color_choice, .. } => color_choice, ShellOut::Write(_) => ColorChoice::Never, } } /// Whether the shell supports color. - pub fn supports_color(&self) -> bool { - match &self.err { + pub fn err_supports_color(&self) -> bool { + match &self.output { ShellOut::Write(_) => false, - ShellOut::Stream { stream, .. } => stream.supports_color(), + ShellOut::Stream { stderr, .. } => stderr.supports_color(), } } @@ -294,14 +308,19 @@ } #[cfg(windows)] { - if let ShellOut::Stream { stream, .. } = &mut self.err { - ::fwdansi::write_ansi(stream, message)?; + if let ShellOut::Stream { stderr, .. } = &mut self.output { + ::fwdansi::write_ansi(stderr, message)?; return Ok(()); } } self.err().write_all(message)?; Ok(()) } + + pub fn print_json(&mut self, obj: &T) { + let encoded = serde_json::to_string(&obj).unwrap(); + drop(writeln!(self.out(), "{}", encoded)); + } } impl Default for Shell { @@ -314,7 +333,7 @@ /// Prints out a message with a status. The status comes first, and is bold plus the given /// color. The status can be justified, in which case the max width that will right align is /// 12 chars. - fn print( + fn message_stderr( &mut self, status: &dyn fmt::Display, message: Option<&dyn fmt::Display>, @@ -322,20 +341,20 @@ justified: bool, ) -> CargoResult<()> { match *self { - ShellOut::Stream { ref mut stream, .. } => { - stream.reset()?; - stream.set_color(ColorSpec::new().set_bold(true).set_fg(Some(color)))?; + ShellOut::Stream { ref mut stderr, .. } => { + stderr.reset()?; + stderr.set_color(ColorSpec::new().set_bold(true).set_fg(Some(color)))?; if justified { - write!(stream, "{:>12}", status)?; + write!(stderr, "{:>12}", status)?; } else { - write!(stream, "{}", status)?; - stream.set_color(ColorSpec::new().set_bold(true))?; - write!(stream, ":")?; + write!(stderr, "{}", status)?; + stderr.set_color(ColorSpec::new().set_bold(true))?; + write!(stderr, ":")?; } - stream.reset()?; + stderr.reset()?; match message { - Some(message) => writeln!(stream, " {}", message)?, - None => write!(stream, " ")?, + Some(message) => writeln!(stderr, " {}", message)?, + None => write!(stderr, " ")?, } } ShellOut::Write(ref mut w) => { @@ -353,10 +372,18 @@ Ok(()) } - /// Gets this object as a `io::Write`. - fn as_write(&mut self) -> &mut dyn Write { + /// Gets stdout as a `io::Write`. + fn stdout(&mut self) -> &mut dyn Write { match *self { - ShellOut::Stream { ref mut stream, .. } => stream, + ShellOut::Stream { ref mut stdout, .. } => stdout, + ShellOut::Write(ref mut w) => w, + } + } + + /// Gets stderr as a `io::Write`. + fn stderr(&mut self) -> &mut dyn Write { + match *self { + ShellOut::Stream { ref mut stderr, .. } => stderr, ShellOut::Write(ref mut w) => w, } } @@ -379,7 +406,7 @@ } } -#[cfg(any(target_os = "linux", target_os = "macos", target_os = "freebsd"))] +#[cfg(unix)] mod imp { use super::Shell; use std::mem; @@ -404,19 +431,7 @@ // This is the "EL - Erase in Line" sequence. It clears from the cursor // to the end of line. // https://en.wikipedia.org/wiki/ANSI_escape_code#CSI_sequences - let _ = shell.err.as_write().write_all(b"\x1B[K"); - } -} - -#[cfg(all( - unix, - not(any(target_os = "linux", target_os = "macos", target_os = "freebsd")) -))] -mod imp { - pub(super) use super::default_err_erase_line as err_erase_line; - - pub fn stderr_width() -> Option { - None + let _ = shell.output.stderr().write_all(b"\x1B[K"); } } @@ -476,16 +491,10 @@ } } -#[cfg(any( - all( - unix, - not(any(target_os = "linux", target_os = "macos", target_os = "freebsd")) - ), - windows, -))] +#[cfg(windows)] fn default_err_erase_line(shell: &mut Shell) { if let Some(max_width) = imp::stderr_width() { let blank = " ".repeat(max_width); - drop(write!(shell.err.as_write(), "{}\r", blank)); + drop(write!(shell.output.stderr(), "{}\r", blank)); } } diff -Nru cargo-0.44.1/src/cargo/core/source/mod.rs cargo-0.47.0/src/cargo/core/source/mod.rs --- cargo-0.44.1/src/cargo/core/source/mod.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/source/mod.rs 2020-07-17 20:39:39.000000000 +0000 @@ -56,7 +56,8 @@ let mut sources = SourceMap::new(); sources.insert(self); let pkg_set = PackageSet::new(&[package], sources, config)?; - Ok(pkg_set.get_one(package)?.clone()) + let pkg = pkg_set.get_one(package)?; + Ok(Package::clone(pkg)) } fn finish_download(&mut self, package: PackageId, contents: Vec) -> CargoResult; diff -Nru cargo-0.44.1/src/cargo/core/source/source_id.rs cargo-0.47.0/src/cargo/core/source/source_id.rs --- cargo-0.44.1/src/cargo/core/source/source_id.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/source/source_id.rs 2020-07-17 20:39:39.000000000 +0000 @@ -4,8 +4,6 @@ use std::hash::{self, Hash}; use std::path::Path; use std::ptr; -use std::sync::atomic::AtomicBool; -use std::sync::atomic::Ordering::SeqCst; use std::sync::Mutex; use log::trace; @@ -14,7 +12,6 @@ use url::Url; use crate::core::PackageId; -use crate::ops; use crate::sources::DirectorySource; use crate::sources::{GitSource, PathSource, RegistrySource, CRATES_IO_INDEX}; use crate::util::{CanonicalUrl, CargoResult, Config, IntoUrl}; @@ -189,22 +186,8 @@ /// a `.cargo/config`. pub fn crates_io(config: &Config) -> CargoResult { config.crates_io_source_id(|| { - let cfg = ops::registry_configuration(config, None)?; - let url = if let Some(ref index) = cfg.index { - static WARNED: AtomicBool = AtomicBool::new(false); - if !WARNED.swap(true, SeqCst) { - config.shell().warn( - "custom registry support via \ - the `registry.index` configuration is \ - being removed, this functionality \ - will not work in the future", - )?; - } - &index[..] - } else { - CRATES_IO_INDEX - }; - let url = url.into_url()?; + config.check_registry_index_not_set()?; + let url = CRATES_IO_INDEX.into_url().unwrap(); SourceId::for_registry(&url) }) } @@ -326,7 +309,7 @@ /// Gets the value of the precise field. pub fn precise(self) -> Option<&'static str> { - self.inner.precise.as_ref().map(|s| &s[..]) + self.inner.precise.as_deref() } /// Gets the Git reference if this is a git source, otherwise `None`. diff -Nru cargo-0.44.1/src/cargo/core/summary.rs cargo-0.47.0/src/cargo/core/summary.rs --- cargo-0.44.1/src/cargo/core/summary.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/summary.rs 2020-07-17 20:39:39.000000000 +0000 @@ -7,8 +7,8 @@ use serde::{Serialize, Serializer}; -use crate::core::interning::InternedString; use crate::core::{Dependency, PackageId, SourceId}; +use crate::util::interning::InternedString; use semver::Version; use crate::util::CargoResult; @@ -91,7 +91,7 @@ &self.inner.features } pub fn checksum(&self) -> Option<&str> { - self.inner.checksum.as_ref().map(|s| &s[..]) + self.inner.checksum.as_deref() } pub fn links(&self) -> Option { self.inner.links @@ -115,8 +115,7 @@ { { let slot = &mut Rc::make_mut(&mut self.inner).dependencies; - let deps = mem::replace(slot, Vec::new()); - *slot = deps.into_iter().map(f).collect(); + *slot = mem::take(slot).into_iter().map(f).collect(); } self } diff -Nru cargo-0.44.1/src/cargo/core/workspace.rs cargo-0.47.0/src/cargo/core/workspace.rs --- cargo-0.44.1/src/cargo/core/workspace.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/core/workspace.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,7 +1,8 @@ use std::cell::RefCell; use std::collections::hash_map::{Entry, HashMap}; -use std::collections::{BTreeMap, HashSet}; +use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::path::{Path, PathBuf}; +use std::rc::Rc; use std::slice; use glob::glob; @@ -11,11 +12,13 @@ use crate::core::features::Features; use crate::core::registry::PackageRegistry; use crate::core::resolver::features::RequestedFeatures; +use crate::core::resolver::ResolveBehavior; use crate::core::{Dependency, PackageId, PackageIdSpec}; use crate::core::{EitherManifest, Package, SourceId, VirtualManifest}; use crate::ops; use crate::sources::PathSource; use crate::util::errors::{CargoResult, CargoResultExt, ManifestError}; +use crate::util::interning::InternedString; use crate::util::paths; use crate::util::toml::{read_manifest, TomlProfiles}; use crate::util::{Config, Filesystem}; @@ -83,6 +86,12 @@ // If `true`, then the resolver will ignore any existing `Cargo.lock` // file. This is set for `cargo install` without `--locked`. ignore_lock: bool, + + /// The resolver behavior specified with the `resolver` field. + resolve_behavior: Option, + + /// Workspace-level custom metadata + custom_metadata: Option, } // Separate structure for tracking loaded packages (to avoid loading anything @@ -121,6 +130,7 @@ members: Option>, default_members: Option>, exclude: Vec, + custom_metadata: Option, } /// An iterator over the member packages of a workspace, returned by @@ -140,8 +150,24 @@ pub fn new(manifest_path: &Path, config: &'cfg Config) -> CargoResult> { let mut ws = Workspace::new_default(manifest_path.to_path_buf(), config); ws.target_dir = config.target_dir()?; - ws.root_manifest = ws.find_root(manifest_path)?; + + if manifest_path.is_relative() { + anyhow::bail!( + "manifest_path:{:?} is not an absolute path. Please provide an absolute path.", + manifest_path + ) + } else { + ws.root_manifest = ws.find_root(manifest_path)?; + } + + ws.custom_metadata = ws + .load_workspace_config()? + .and_then(|cfg| cfg.custom_metadata); ws.find_members()?; + ws.resolve_behavior = match ws.root_maybe() { + MaybePackage::Package(p) => p.manifest().resolve_behavior(), + MaybePackage::Virtual(vm) => vm.resolve_behavior(), + }; ws.validate()?; Ok(ws) } @@ -163,6 +189,8 @@ require_optional_deps: true, loaded_packages: RefCell::new(HashMap::new()), ignore_lock: false, + resolve_behavior: None, + custom_metadata: None, } } @@ -175,6 +203,7 @@ let mut ws = Workspace::new_default(current_manifest, config); ws.root_manifest = Some(root_path.join("Cargo.toml")); ws.target_dir = config.target_dir()?; + ws.resolve_behavior = manifest.resolve_behavior(); ws.packages .packages .insert(root_path, MaybePackage::Virtual(manifest)); @@ -202,6 +231,7 @@ let mut ws = Workspace::new_default(package.manifest_path().to_path_buf(), config); ws.is_ephemeral = true; ws.require_optional_deps = require_optional_deps; + ws.resolve_behavior = package.manifest().resolve_behavior(); let key = ws.current_manifest.parent().unwrap(); let id = package.package_id(); let package = MaybePackage::Package(package); @@ -374,6 +404,30 @@ self } + pub fn custom_metadata(&self) -> Option<&toml::Value> { + self.custom_metadata.as_ref() + } + + pub fn load_workspace_config(&mut self) -> CargoResult> { + // If we didn't find a root, it must mean there is no [workspace] section, and thus no + // metadata. + if let Some(root_path) = &self.root_manifest { + let root_package = self.packages.load(root_path)?; + match root_package.workspace_config() { + WorkspaceConfig::Root(ref root_config) => { + return Ok(Some(root_config.clone())); + } + + _ => anyhow::bail!( + "root of a workspace inferred but wasn't a root: {}", + root_path.display() + ), + } + } + + Ok(None) + } + /// Finds the root of a workspace for the crate whose manifest is located /// at `manifest_path`. /// @@ -455,8 +509,8 @@ /// will transitively follow all `path` dependencies looking for members of /// the workspace. fn find_members(&mut self) -> CargoResult<()> { - let root_manifest_path = match self.root_manifest { - Some(ref path) => path.clone(), + let workspace_config = match self.load_workspace_config()? { + Some(workspace_config) => workspace_config, None => { debug!("find_members - only me as a member"); self.members.push(self.current_manifest.clone()); @@ -469,30 +523,20 @@ } }; - let members_paths; - let default_members_paths; - { - let root_package = self.packages.load(&root_manifest_path)?; - match *root_package.workspace_config() { - WorkspaceConfig::Root(ref root_config) => { - members_paths = root_config - .members_paths(root_config.members.as_ref().unwrap_or(&vec![]))?; - default_members_paths = if root_manifest_path == self.current_manifest { - if let Some(ref default) = root_config.default_members { - Some(root_config.members_paths(default)?) - } else { - None - } - } else { - None - }; - } - _ => anyhow::bail!( - "root of a workspace inferred but wasn't a root: {}", - root_manifest_path.display() - ), + // self.root_manifest must be Some to have retrieved workspace_config + let root_manifest_path = self.root_manifest.clone().unwrap(); + + let members_paths = + workspace_config.members_paths(workspace_config.members.as_ref().unwrap_or(&vec![]))?; + let default_members_paths = if root_manifest_path == self.current_manifest { + if let Some(ref default) = workspace_config.default_members { + Some(workspace_config.members_paths(default)?) + } else { + None } - } + } else { + None + }; for path in members_paths { self.find_path_deps(&path.join("Cargo.toml"), &root_manifest_path, false)?; @@ -577,6 +621,18 @@ } } + pub fn resolve_behavior(&self) -> ResolveBehavior { + self.resolve_behavior.unwrap_or(ResolveBehavior::V1) + } + + pub fn allows_unstable_package_features(&self) -> bool { + self.config().cli_unstable().package_features + || match self.resolve_behavior() { + ResolveBehavior::V1 => false, + ResolveBehavior::V2 => true, + } + } + /// Validates a workspace, ensuring that a number of invariants are upheld: /// /// 1. A workspace only has one root. @@ -588,35 +644,47 @@ return Ok(()); } - let mut roots = Vec::new(); - { - let mut names = BTreeMap::new(); - for member in self.members.iter() { - let package = self.packages.get(member); - match *package.workspace_config() { - WorkspaceConfig::Root(_) => { - roots.push(member.parent().unwrap().to_path_buf()); - } - WorkspaceConfig::Member { .. } => {} - } - let name = match *package { - MaybePackage::Package(ref p) => p.name(), - MaybePackage::Virtual(_) => continue, - }; - if let Some(prev) = names.insert(name, member) { - anyhow::bail!( - "two packages named `{}` in this workspace:\n\ + self.validate_unique_names()?; + self.validate_workspace_roots()?; + self.validate_members()?; + self.error_if_manifest_not_in_members()?; + self.validate_manifest() + } + + fn validate_unique_names(&self) -> CargoResult<()> { + let mut names = BTreeMap::new(); + for member in self.members.iter() { + let package = self.packages.get(member); + let name = match *package { + MaybePackage::Package(ref p) => p.name(), + MaybePackage::Virtual(_) => continue, + }; + if let Some(prev) = names.insert(name, member) { + anyhow::bail!( + "two packages named `{}` in this workspace:\n\ - {}\n\ - {}", - name, - prev.display(), - member.display() - ); - } + name, + prev.display(), + member.display() + ); } } + Ok(()) + } + fn validate_workspace_roots(&self) -> CargoResult<()> { + let roots: Vec = self + .members + .iter() + .filter(|&member| { + let config = self.packages.get(member).workspace_config(); + matches!(config, WorkspaceConfig::Root(_)) + }) + .map(|member| member.parent().unwrap().to_path_buf()) + .collect(); match roots.len() { + 1 => Ok(()), 0 => anyhow::bail!( "`package.workspace` configuration points to a crate \ which is not configured with [workspace]: \n\ @@ -625,7 +693,6 @@ self.current_manifest.display(), self.root_manifest.as_ref().unwrap().display() ), - 1 => {} _ => { anyhow::bail!( "multiple workspace roots found in the same workspace:\n{}", @@ -637,7 +704,9 @@ ); } } + } + fn validate_members(&mut self) -> CargoResult<()> { for member in self.members.clone() { let root = self.find_root(&member)?; if root == self.root_manifest { @@ -665,62 +734,68 @@ } } } + Ok(()) + } + + fn error_if_manifest_not_in_members(&mut self) -> CargoResult<()> { + if self.members.contains(&self.current_manifest) { + return Ok(()); + } - if !self.members.contains(&self.current_manifest) { - let root = self.root_manifest.as_ref().unwrap(); - let root_dir = root.parent().unwrap(); - let current_dir = self.current_manifest.parent().unwrap(); - let root_pkg = self.packages.get(root); - - // FIXME: Make this more generic by using a relative path resolver between member and - // root. - let members_msg = match current_dir.strip_prefix(root_dir) { - Ok(rel) => format!( - "this may be fixable by adding `{}` to the \ + let root = self.root_manifest.as_ref().unwrap(); + let root_dir = root.parent().unwrap(); + let current_dir = self.current_manifest.parent().unwrap(); + let root_pkg = self.packages.get(root); + + // FIXME: Make this more generic by using a relative path resolver between member and root. + let members_msg = match current_dir.strip_prefix(root_dir) { + Ok(rel) => format!( + "this may be fixable by adding `{}` to the \ `workspace.members` array of the manifest \ located at: {}", - rel.display(), - root.display() - ), - Err(_) => format!( - "this may be fixable by adding a member to \ + rel.display(), + root.display() + ), + Err(_) => format!( + "this may be fixable by adding a member to \ the `workspace.members` array of the \ manifest located at: {}", - root.display() - ), - }; - let extra = match *root_pkg { - MaybePackage::Virtual(_) => members_msg, - MaybePackage::Package(ref p) => { - let has_members_list = match *p.manifest().workspace_config() { - WorkspaceConfig::Root(ref root_config) => root_config.has_members_list(), - WorkspaceConfig::Member { .. } => unreachable!(), - }; - if !has_members_list { - format!( - "this may be fixable by ensuring that this \ + root.display() + ), + }; + let extra = match *root_pkg { + MaybePackage::Virtual(_) => members_msg, + MaybePackage::Package(ref p) => { + let has_members_list = match *p.manifest().workspace_config() { + WorkspaceConfig::Root(ref root_config) => root_config.has_members_list(), + WorkspaceConfig::Member { .. } => unreachable!(), + }; + if !has_members_list { + format!( + "this may be fixable by ensuring that this \ crate is depended on by the workspace \ root: {}", - root.display() - ) - } else { - members_msg - } + root.display() + ) + } else { + members_msg } - }; - anyhow::bail!( - "current package believes it's in a workspace when it's not:\n\ + } + }; + anyhow::bail!( + "current package believes it's in a workspace when it's not:\n\ current: {}\n\ workspace: {}\n\n{}\n\ Alternatively, to keep it out of the workspace, add the package \ to the `workspace.exclude` array, or add an empty `[workspace]` \ table to the package's manifest.", - self.current_manifest.display(), - root.display(), - extra - ); - } + self.current_manifest.display(), + root.display(), + extra + ); + } + fn validate_manifest(&mut self) -> CargoResult<()> { if let Some(ref root_manifest) = self.root_manifest { for pkg in self .members() @@ -749,9 +824,14 @@ if !manifest.patch().is_empty() { emit_warning("patch")?; } + if manifest.resolve_behavior().is_some() + && manifest.resolve_behavior() != self.resolve_behavior + { + // Only warn if they don't match. + emit_warning("resolver")?; + } } } - Ok(()) } @@ -815,7 +895,7 @@ let err = anyhow::format_err!("{}", warning.message); let cx = anyhow::format_err!("failed to parse manifest at `{}`", path.display()); - return Err(err.context(cx).into()); + return Err(err.context(cx)); } else { let msg = if self.root_manifest.is_none() { warning.message.to_string() @@ -859,60 +939,144 @@ .map(|m| (m, RequestedFeatures::new_all(true))) .collect()); } - if self.config().cli_unstable().package_features { - if specs.len() > 1 && !requested_features.features.is_empty() { - anyhow::bail!("cannot specify features for more than one package"); + if self.allows_unstable_package_features() { + self.members_with_features_pf(specs, requested_features) + } else { + self.members_with_features_stable(specs, requested_features) + } + } + + /// New command-line feature selection with -Zpackage-features. + fn members_with_features_pf( + &self, + specs: &[PackageIdSpec], + requested_features: &RequestedFeatures, + ) -> CargoResult> { + // Keep track of which features matched *any* member, to produce an error + // if any of them did not match anywhere. + let mut found: BTreeSet = BTreeSet::new(); + + // Returns the requested features for the given member. + // This filters out any named features that the member does not have. + let mut matching_features = |member: &Package| -> RequestedFeatures { + if requested_features.features.is_empty() || requested_features.all_features { + return requested_features.clone(); } - let members: Vec<(&Package, RequestedFeatures)> = self + // Only include features this member defines. + let summary = member.summary(); + let member_features = summary.features(); + let mut features = BTreeSet::new(); + + // Checks if a member contains the given feature. + let contains = |feature: InternedString| -> bool { + member_features.contains_key(&feature) + || summary + .dependencies() + .iter() + .any(|dep| dep.is_optional() && dep.name_in_toml() == feature) + }; + + for feature in requested_features.features.iter() { + let mut split = feature.splitn(2, '/'); + let split = (split.next().unwrap(), split.next()); + if let (pkg, Some(pkg_feature)) = split { + let pkg = InternedString::new(pkg); + let pkg_feature = InternedString::new(pkg_feature); + if summary + .dependencies() + .iter() + .any(|dep| dep.name_in_toml() == pkg) + { + // pkg/feat for a dependency. + // Will rely on the dependency resolver to validate `feat`. + features.insert(*feature); + found.insert(*feature); + } else if pkg == member.name() && contains(pkg_feature) { + // member/feat where "feat" is a feature in member. + features.insert(pkg_feature); + found.insert(*feature); + } + } else if contains(*feature) { + // feature exists in this member. + features.insert(*feature); + found.insert(*feature); + } + } + RequestedFeatures { + features: Rc::new(features), + all_features: false, + uses_default_features: requested_features.uses_default_features, + } + }; + + let members: Vec<(&Package, RequestedFeatures)> = self + .members() + .filter(|m| specs.iter().any(|spec| spec.matches(m.package_id()))) + .map(|m| (m, matching_features(m))) + .collect(); + if members.is_empty() { + // `cargo build -p foo`, where `foo` is not a member. + // Do not allow any command-line flags (defaults only). + if !(requested_features.features.is_empty() + && !requested_features.all_features + && requested_features.uses_default_features) + { + anyhow::bail!("cannot specify features for packages outside of workspace"); + } + // Add all members from the workspace so we can ensure `-p nonmember` + // is in the resolve graph. + return Ok(self .members() - .filter(|m| specs.iter().any(|spec| spec.matches(m.package_id()))) - .map(|m| (m, requested_features.clone())) + .map(|m| (m, RequestedFeatures::new_all(false))) + .collect()); + } + if *requested_features.features != found { + let missing: Vec<_> = requested_features + .features + .difference(&found) + .copied() .collect(); - if members.is_empty() { - // `cargo build -p foo`, where `foo` is not a member. - // Do not allow any command-line flags (defaults only). - if !(requested_features.features.is_empty() - && !requested_features.all_features - && requested_features.uses_default_features) - { - anyhow::bail!("cannot specify features for packages outside of workspace"); + // TODO: typo suggestions would be good here. + anyhow::bail!( + "none of the selected packages contains these features: {}", + missing.join(", ") + ); + } + Ok(members) + } + + /// This is the current "stable" behavior for command-line feature selection. + fn members_with_features_stable( + &self, + specs: &[PackageIdSpec], + requested_features: &RequestedFeatures, + ) -> CargoResult> { + let ms = self.members().filter_map(|member| { + let member_id = member.package_id(); + match self.current_opt() { + // The features passed on the command-line only apply to + // the "current" package (determined by the cwd). + Some(current) if member_id == current.package_id() => { + Some((member, requested_features.clone())) } - // Add all members from the workspace so we can ensure `-p nonmember` - // is in the resolve graph. - return Ok(self - .members() - .map(|m| (m, RequestedFeatures::new_all(false))) - .collect()); - } - Ok(members) - } else { - let ms = self.members().filter_map(|member| { - let member_id = member.package_id(); - match self.current_opt() { - // The features passed on the command-line only apply to - // the "current" package (determined by the cwd). - Some(current) if member_id == current.package_id() => { - Some((member, requested_features.clone())) - } - _ => { - // Ignore members that are not enabled on the command-line. - if specs.iter().any(|spec| spec.matches(member_id)) { - // -p for a workspace member that is not the - // "current" one, don't use the local - // `--features`, only allow `--all-features`. - Some(( - member, - RequestedFeatures::new_all(requested_features.all_features), - )) - } else { - // This member was not requested on the command-line, skip. - None - } + _ => { + // Ignore members that are not enabled on the command-line. + if specs.iter().any(|spec| spec.matches(member_id)) { + // -p for a workspace member that is not the + // "current" one, don't use the local + // `--features`, only allow `--all-features`. + Some(( + member, + RequestedFeatures::new_all(requested_features.all_features), + )) + } else { + // This member was not requested on the command-line, skip. + None } } - }); - Ok(ms.collect()) - } + } + }); + Ok(ms.collect()) } } @@ -988,12 +1152,14 @@ members: &Option>, default_members: &Option>, exclude: &Option>, + custom_metadata: &Option, ) -> WorkspaceRootConfig { WorkspaceRootConfig { root_dir: root_dir.to_path_buf(), members: members.clone(), default_members: default_members.clone(), exclude: exclude.clone().unwrap_or_default(), + custom_metadata: custom_metadata.clone(), } } diff -Nru cargo-0.44.1/src/cargo/lib.rs cargo-0.47.0/src/cargo/lib.rs --- cargo-0.44.1/src/cargo/lib.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/lib.rs 2020-07-17 20:39:39.000000000 +0000 @@ -28,13 +28,12 @@ #![allow(clippy::unneeded_field_pattern)] // false positives in target-specific code, for details see // https://github.com/rust-lang/cargo/pull/7251#pullrequestreview-274914270 -#![allow(clippy::identity_conversion)] +#![allow(clippy::useless_conversion)] use crate::core::shell::Verbosity::Verbose; use crate::core::Shell; use anyhow::Error; use log::debug; -use serde::ser; use std::fmt; pub use crate::util::errors::{InternalError, VerboseError}; @@ -93,11 +92,6 @@ } } -pub fn print_json(obj: &T) { - let encoded = serde_json::to_string(&obj).unwrap(); - println!("{}", encoded); -} - pub fn exit_with_error(err: CliError, shell: &mut Shell) -> ! { debug!("exit_with_error; err={:?}", err); if let Some(ref err) = err.error { @@ -117,7 +111,7 @@ /// Displays an error, and all its causes, to stderr. pub fn display_error(err: &Error, shell: &mut Shell) { debug!("display_error; err={:?}", err); - let has_verbose = _display_error(err, shell); + let has_verbose = _display_error(err, shell, true); if has_verbose { drop(writeln!( shell.err(), @@ -140,7 +134,15 @@ } } -fn _display_error(err: &Error, shell: &mut Shell) -> bool { +/// Displays a warning, with an error object providing detailed information +/// and context. +pub fn display_warning_with_error(warning: &str, err: &Error, shell: &mut Shell) { + drop(shell.warn(warning)); + drop(writeln!(shell.err())); + _display_error(err, shell, false); +} + +fn _display_error(err: &Error, shell: &mut Shell, as_err: bool) -> bool { let verbosity = shell.verbosity(); let is_verbose = |e: &(dyn std::error::Error + 'static)| -> bool { verbosity != Verbose && e.downcast_ref::().is_some() @@ -149,7 +151,11 @@ if is_verbose(err.as_ref()) { return true; } - drop(shell.error(&err)); + if as_err { + drop(shell.error(&err)); + } else { + drop(writeln!(shell.err(), "{}", err)); + } for cause in err.chain().skip(1) { // If we're not in verbose mode then print remaining errors until one // marked as `VerboseError` appears. @@ -157,7 +163,13 @@ return true; } drop(writeln!(shell.err(), "\nCaused by:")); - drop(writeln!(shell.err(), " {}", cause)); + for line in cause.to_string().lines() { + if line.is_empty() { + drop(writeln!(shell.err(), "")); + } else { + drop(writeln!(shell.err(), " {}", line)); + } + } } false } diff -Nru cargo-0.44.1/src/cargo/ops/cargo_clean.rs cargo-0.47.0/src/cargo/ops/cargo_clean.rs --- cargo-0.44.1/src/cargo/ops/cargo_clean.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/cargo_clean.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,25 +1,20 @@ -use crate::core::InternedString; -use std::collections::HashMap; -use std::fs; -use std::path::Path; - -use crate::core::compiler::unit_dependencies; -use crate::core::compiler::{BuildConfig, BuildContext, CompileKind, CompileMode, Context}; -use crate::core::compiler::{RustcTargetData, UnitInterner}; -use crate::core::profiles::{Profiles, UnitFor}; -use crate::core::resolver::features::{FeatureResolver, HasDevUnits, RequestedFeatures}; -use crate::core::{PackageIdSpec, Workspace}; +use crate::core::compiler::{CompileKind, CompileMode, Layout, RustcTargetData}; +use crate::core::profiles::Profiles; +use crate::core::{PackageIdSpec, TargetKind, Workspace}; use crate::ops; use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::interning::InternedString; use crate::util::paths; use crate::util::Config; +use std::fs; +use std::path::Path; pub struct CleanOptions<'a> { pub config: &'a Config, /// A list of packages to clean. If empty, everything is cleaned. pub spec: Vec, /// The target arch triple to clean, or None for the host arch - pub target: Option, + pub targets: Vec, /// Whether to clean the release directory pub profile_specified: bool, /// Whether to clean the directory of a certain build profile @@ -57,115 +52,156 @@ if opts.spec.is_empty() { return rm_rf(&target_dir.into_path_unlocked(), config); } - let (packages, resolve) = ops::resolve_ws(ws)?; - let interner = UnitInterner::new(); - let mut build_config = BuildConfig::new(config, Some(1), &opts.target, CompileMode::Build)?; - build_config.requested_profile = opts.requested_profile; - let target_data = RustcTargetData::new(ws, build_config.requested_kind)?; - let bcx = BuildContext::new( - ws, - &packages, - opts.config, - &build_config, - profiles, - &interner, - HashMap::new(), - target_data, - )?; - let requested_features = RequestedFeatures::new_all(true); - let specs = opts - .spec - .iter() - .map(|spec| PackageIdSpec::parse(spec)) - .collect::>>()?; - let features = FeatureResolver::resolve( - ws, - &bcx.target_data, - &resolve, - &requested_features, - &specs, - bcx.build_config.requested_kind, - HasDevUnits::Yes, - )?; - let mut units = Vec::new(); - - for spec in opts.spec.iter() { - // Translate the spec to a Package - let pkgid = resolve.query(spec)?; - let pkg = packages.get_one(pkgid)?; + // Clean specific packages. + let requested_kinds = CompileKind::from_requested_targets(config, &opts.targets)?; + let target_data = RustcTargetData::new(ws, &requested_kinds)?; + let (pkg_set, resolve) = ops::resolve_ws(ws)?; + let prof_dir_name = profiles.get_dir_name(); + let host_layout = Layout::new(ws, None, &prof_dir_name)?; + // Convert requested kinds to a Vec of layouts. + let target_layouts: Vec<(CompileKind, Layout)> = requested_kinds + .into_iter() + .filter_map(|kind| match kind { + CompileKind::Target(target) => match Layout::new(ws, Some(target), &prof_dir_name) { + Ok(layout) => Some(Ok((kind, layout))), + Err(e) => Some(Err(e)), + }, + CompileKind::Host => None, + }) + .collect::>()?; + // A Vec of layouts. This is a little convoluted because there can only be + // one host_layout. + let layouts = if opts.targets.is_empty() { + vec![(CompileKind::Host, &host_layout)] + } else { + target_layouts + .iter() + .map(|(kind, layout)| (*kind, layout)) + .collect() + }; + // Create a Vec that also includes the host for things that need to clean both. + let layouts_with_host: Vec<(CompileKind, &Layout)> = + std::iter::once((CompileKind::Host, &host_layout)) + .chain(layouts.iter().map(|(k, l)| (*k, *l))) + .collect(); + + // Cleaning individual rustdoc crates is currently not supported. + // For example, the search index would need to be rebuilt to fully + // remove it (otherwise you're left with lots of broken links). + // Doc tests produce no output. + + // Get Packages for the specified specs. + let mut pkg_ids = Vec::new(); + for spec_str in opts.spec.iter() { + // Translate the spec to a Package. + let spec = PackageIdSpec::parse(spec_str)?; + if spec.version().is_some() { + config.shell().warn(&format!( + "version qualifier in `-p {}` is ignored, \ + cleaning all versions of `{}` found", + spec_str, + spec.name() + ))?; + } + if spec.url().is_some() { + config.shell().warn(&format!( + "url qualifier in `-p {}` ignored, \ + cleaning all versions of `{}` found", + spec_str, + spec.name() + ))?; + } + let matches: Vec<_> = resolve.iter().filter(|id| spec.matches(*id)).collect(); + if matches.is_empty() { + anyhow::bail!("package ID specification `{}` matched no packages", spec); + } + pkg_ids.extend(matches); + } + let packages = pkg_set.get_many(pkg_ids)?; + + for pkg in packages { + let pkg_dir = format!("{}-*", pkg.name()); + + // Clean fingerprints. + for (_, layout) in &layouts_with_host { + rm_rf_glob(&layout.fingerprint().join(&pkg_dir), config)?; + } - // Generate all relevant `Unit` targets for this package for target in pkg.targets() { - for kind in [CompileKind::Host, build_config.requested_kind].iter() { - for mode in CompileMode::all_modes() { - for unit_for in UnitFor::all_values() { - let profile = if mode.is_run_custom_build() { - bcx.profiles - .get_profile_run_custom_build(&bcx.profiles.get_profile( - pkg.package_id(), - ws.is_member(pkg), - *unit_for, - CompileMode::Build, - )) - } else { - bcx.profiles.get_profile( - pkg.package_id(), - ws.is_member(pkg), - *unit_for, - *mode, - ) - }; - // Use unverified here since this is being more - // exhaustive than what is actually needed. - let features_for = unit_for.map_to_features_for(); - let features = - features.activated_features_unverified(pkg.package_id(), features_for); - units.push(bcx.units.intern( - pkg, target, profile, *kind, *mode, features, /*is_std*/ false, - )); + if target.is_custom_build() { + // Get both the build_script_build and the output directory. + for (_, layout) in &layouts_with_host { + rm_rf_glob(&layout.build().join(&pkg_dir), config)?; + } + continue; + } + let crate_name = target.crate_name(); + for &mode in &[ + CompileMode::Build, + CompileMode::Test, + CompileMode::Check { test: false }, + ] { + for (compile_kind, layout) in &layouts { + let triple = target_data.short_name(compile_kind); + + let (file_types, _unsupported) = target_data + .info(*compile_kind) + .rustc_outputs(mode, target.kind(), triple)?; + let (dir, uplift_dir) = match target.kind() { + TargetKind::ExampleBin | TargetKind::ExampleLib(..) => { + (layout.examples(), Some(layout.examples())) + } + // Tests/benchmarks are never uplifted. + TargetKind::Test | TargetKind::Bench => (layout.deps(), None), + _ => (layout.deps(), Some(layout.dest())), + }; + for file_type in file_types { + // Some files include a hash in the filename, some don't. + let hashed_name = file_type.output_filename(target, Some("*")); + let unhashed_name = file_type.output_filename(target, None); + rm_rf_glob(&dir.join(&hashed_name), config)?; + rm_rf(&dir.join(&unhashed_name), config)?; + // Remove dep-info file generated by rustc. It is not tracked in + // file_types. It does not have a prefix. + let hashed_dep_info = dir.join(format!("{}-*.d", crate_name)); + let unhashed_dep_info = dir.join(format!("{}.d", crate_name)); + rm_rf_glob(&hashed_dep_info, config)?; + rm_rf(&unhashed_dep_info, config)?; + + // Remove the uplifted copy. + if let Some(uplift_dir) = uplift_dir { + let uplifted_path = uplift_dir.join(file_type.uplift_filename(target)); + rm_rf(&uplifted_path, config)?; + // Dep-info generated by Cargo itself. + let dep_info = uplifted_path.with_extension("d"); + rm_rf(&dep_info, config)?; + } } + // TODO: what to do about build_script_build? + let incremental = layout.incremental().join(format!("{}-*", crate_name)); + rm_rf_glob(&incremental, config)?; } } } } - let unit_dependencies = - unit_dependencies::build_unit_dependencies(&bcx, &resolve, &features, None, &units, &[])?; - let mut cx = Context::new(config, &bcx, unit_dependencies, build_config.requested_kind)?; - cx.prepare_units(None, &units)?; - - for unit in units.iter() { - if unit.mode.is_doc() || unit.mode.is_doc_test() { - // Cleaning individual rustdoc crates is currently not supported. - // For example, the search index would need to be rebuilt to fully - // remove it (otherwise you're left with lots of broken links). - // Doc tests produce no output. - continue; - } - rm_rf(&cx.files().fingerprint_dir(unit), config)?; - if unit.target.is_custom_build() { - if unit.mode.is_run_custom_build() { - rm_rf(&cx.files().build_script_out_dir(unit), config)?; - } else { - rm_rf(&cx.files().build_script_dir(unit), config)?; - } - continue; - } + Ok(()) +} - for output in cx.outputs(unit)?.iter() { - rm_rf(&output.path, config)?; - if let Some(ref dst) = output.hardlink { - rm_rf(dst, config)?; - } - } +fn rm_rf_glob(pattern: &Path, config: &Config) -> CargoResult<()> { + // TODO: Display utf8 warning to user? Or switch to globset? + let pattern = pattern + .to_str() + .ok_or_else(|| anyhow::anyhow!("expected utf-8 path"))?; + for path in glob::glob(pattern)? { + rm_rf(&path?, config)?; } - Ok(()) } fn rm_rf(path: &Path, config: &Config) -> CargoResult<()> { - let m = fs::metadata(path); + let m = fs::symlink_metadata(path); if m.as_ref().map(|s| s.is_dir()).unwrap_or(false) { config .shell() diff -Nru cargo-0.44.1/src/cargo/ops/cargo_compile.rs cargo-0.47.0/src/cargo/ops/cargo_compile.rs --- cargo-0.44.1/src/cargo/ops/cargo_compile.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/cargo_compile.rs 2020-07-17 20:39:39.000000000 +0000 @@ -7,14 +7,13 @@ //! rough outline is: //! //! - Resolve the dependency graph (see `ops::resolve`). -//! - Download any packages needed (see `PackageSet`). Note that dependency -//! downloads are deferred until `build_unit_dependencies`. +//! - Download any packages needed (see `PackageSet`). //! - Generate a list of top-level "units" of work for the targets the user //! requested on the command-line. Each `Unit` corresponds to a compiler //! invocation. This is done in this module (`generate_targets`). +//! - Build the graph of `Unit` dependencies (see +//! `core::compiler::context::unit_dependencies`). //! - Create a `Context` which will perform the following steps: -//! - Build the graph of `Unit` dependencies (see -//! `core::compiler::context::unit_dependencies`). //! - Prepare the `target` directory (see `Layout`). //! - Create a job queue (see `JobQueue`). The queue checks the //! fingerprint of each `Unit` to determine if it should run or be @@ -25,18 +24,17 @@ use std::collections::{BTreeSet, HashMap, HashSet}; use std::iter::FromIterator; -use std::path::PathBuf; use std::sync::Arc; -use crate::core::compiler::standard_lib; use crate::core::compiler::unit_dependencies::build_unit_dependencies; +use crate::core::compiler::{standard_lib, unit_graph}; use crate::core::compiler::{BuildConfig, BuildContext, Compilation, Context}; use crate::core::compiler::{CompileKind, CompileMode, RustcTargetData, Unit}; use crate::core::compiler::{DefaultExecutor, Executor, UnitInterner}; use crate::core::profiles::{Profiles, UnitFor}; use crate::core::resolver::features::{self, FeaturesFor}; use crate::core::resolver::{HasDevUnits, Resolve, ResolveOpts}; -use crate::core::{LibKind, Package, PackageSet, Target}; +use crate::core::{Package, PackageSet, Target}; use crate::core::{PackageId, PackageIdSpec, TargetKind, Workspace}; use crate::ops; use crate::ops::resolve::WorkspaceResolve; @@ -44,9 +42,15 @@ use crate::util::{closest_msg, profile, CargoResult}; /// Contains information about how a package should be compiled. +/// +/// Note on distinction between `CompileOptions` and `BuildConfig`: +/// `BuildConfig` contains values that need to be retained after +/// `BuildContext` is created. The other fields are no longer necessary. Think +/// of it as `CompileOptions` are high-level settings requested on the +/// command-line, and `BuildConfig` are low-level settings for actually +/// driving `rustc`. #[derive(Debug)] -pub struct CompileOptions<'a> { - pub config: &'a Config, +pub struct CompileOptions { /// Configuration information for a rustc build pub build_config: BuildConfig, /// Extra features to build for the root package @@ -70,19 +74,12 @@ /// Whether the `--document-private-items` flags was specified and should /// be forwarded to `rustdoc`. pub rustdoc_document_private_items: bool, - /// The directory to copy final artifacts to. Note that even if `out_dir` is - /// set, a copy of artifacts still could be found a `target/(debug\release)` - /// as usual. - // Note that, although the cmd-line flag name is `out-dir`, in code we use - // `export_dir`, to avoid confusion with out dir at `target/debug/deps`. - pub export_dir: Option, } -impl<'a> CompileOptions<'a> { - pub fn new(config: &'a Config, mode: CompileMode) -> CargoResult> { +impl<'a> CompileOptions { + pub fn new(config: &Config, mode: CompileMode) -> CargoResult { Ok(CompileOptions { - config, - build_config: BuildConfig::new(config, None, &None, mode)?, + build_config: BuildConfig::new(config, None, &[], mode)?, features: Vec::new(), all_features: false, no_default_features: false, @@ -94,7 +91,6 @@ target_rustc_args: None, local_rustdoc_args: None, rustdoc_document_private_items: false, - export_dir: None, }) } } @@ -241,10 +237,7 @@ }, } -pub fn compile<'a>( - ws: &Workspace<'a>, - options: &CompileOptions<'a>, -) -> CargoResult> { +pub fn compile<'a>(ws: &Workspace<'a>, options: &CompileOptions) -> CargoResult> { let exec: Arc = Arc::new(DefaultExecutor); compile_with_exec(ws, options, &exec) } @@ -253,7 +246,7 @@ /// calls and add custom logic. `compile` uses `DefaultExecutor` which just passes calls through. pub fn compile_with_exec<'a>( ws: &Workspace<'a>, - options: &CompileOptions<'a>, + options: &CompileOptions, exec: &Arc, ) -> CargoResult> { ws.emit_warnings()?; @@ -262,11 +255,27 @@ pub fn compile_ws<'a>( ws: &Workspace<'a>, - options: &CompileOptions<'a>, + options: &CompileOptions, exec: &Arc, ) -> CargoResult> { + let interner = UnitInterner::new(); + let bcx = create_bcx(ws, options, &interner)?; + if options.build_config.unit_graph { + unit_graph::emit_serialized_unit_graph(&bcx.roots, &bcx.unit_graph)?; + return Ok(Compilation::new(&bcx)?); + } + + let _p = profile::start("compiling"); + let cx = Context::new(&bcx)?; + cx.compile(exec) +} + +pub fn create_bcx<'a, 'cfg>( + ws: &'a Workspace<'cfg>, + options: &'a CompileOptions, + interner: &'a UnitInterner, +) -> CargoResult> { let CompileOptions { - config, ref build_config, ref spec, ref features, @@ -277,8 +286,8 @@ ref target_rustc_args, ref local_rustdoc_args, rustdoc_document_private_items, - ref export_dir, } = *options; + let config = ws.config(); match build_config.mode { CompileMode::Test @@ -301,13 +310,7 @@ } } - let profiles = Profiles::new( - ws.profiles(), - config, - build_config.requested_profile, - ws.features(), - )?; - let target_data = RustcTargetData::new(ws, build_config.requested_kind)?; + let target_data = RustcTargetData::new(ws, &build_config.requested_kinds)?; let specs = spec.to_package_id_specs(ws)?; let dev_deps = ws.require_optional_deps() || filter.need_dev_deps(build_config.mode); @@ -320,10 +323,11 @@ let resolve = ops::resolve_ws_with_opts( ws, &target_data, - build_config.requested_kind, + &build_config.requested_kinds, &opts, &specs, has_dev_units, + crate::core::resolver::features::ForceAllTargets::No, )?; let WorkspaceResolve { mut pkg_set, @@ -338,15 +342,14 @@ .shell() .warn("-Zbuild-std does not currently fully support --build-plan")?; } - if build_config.requested_kind.is_host() { + if build_config.requested_kinds[0].is_host() { // TODO: This should eventually be fixed. Unfortunately it is not // easy to get the host triple in BuildConfig. Consider changing // requested_target to an enum, or some other approach. anyhow::bail!("-Zbuild-std requires --target"); } - let (mut std_package_set, std_resolve, std_features) = - standard_lib::resolve_std(ws, &target_data, build_config.requested_kind, crates)?; - remove_dylib_crate_type(&mut std_package_set)?; + let (std_package_set, std_resolve, std_features) = + standard_lib::resolve_std(ws, &target_data, &build_config.requested_kinds, crates)?; pkg_set.add_set(std_package_set); Some((std_resolve, std_features)) } else { @@ -355,11 +358,8 @@ // Find the packages in the resolver that the user wants to build (those // passed in with `-p` or the defaults from the workspace), and convert - // Vec to a Vec<&PackageId>. - let to_build_ids = specs - .iter() - .map(|s| s.query(resolve.iter())) - .collect::>>()?; + // Vec to a Vec. + let to_build_ids = resolve.specs_to_ids(&specs)?; // Now get the `Package` for each `PackageId`. This may trigger a download // if the user specified `-p` for a dependency that is not downloaded. // Dependencies will be downloaded during build_unit_dependencies. @@ -398,32 +398,29 @@ ); } + let profiles = Profiles::new( + ws.profiles(), + config, + build_config.requested_profile, + ws.features(), + )?; profiles.validate_packages( ws.profiles(), &mut config.shell(), workspace_resolve.as_ref().unwrap_or(&resolve), )?; - let interner = UnitInterner::new(); - let mut bcx = BuildContext::new( - ws, - &pkg_set, - config, - build_config, - profiles, - &interner, - HashMap::new(), - target_data, - )?; - let units = generate_targets( ws, &to_builds, filter, - build_config.requested_kind, + &build_config.requested_kinds, + build_config.mode, &resolve, &resolved_features, - &bcx, + &pkg_set, + &profiles, + interner, )?; let std_roots = if let Some(crates) = &config.cli_unstable().build_std { @@ -441,16 +438,19 @@ } let (std_resolve, std_features) = std_resolve_features.as_ref().unwrap(); standard_lib::generate_std_roots( - &bcx, &crates, std_resolve, std_features, - build_config.requested_kind, + &build_config.requested_kinds, + &pkg_set, + interner, + &profiles, )? } else { - Vec::new() + Default::default() }; + let mut extra_compiler_args = HashMap::new(); if let Some(args) = extra_args { if units.len() != 1 { anyhow::bail!( @@ -460,7 +460,7 @@ extra_args_name ); } - bcx.extra_compiler_args.insert(units[0], args); + extra_compiler_args.insert(units[0].clone(), args); } for unit in &units { if unit.mode.is_doc() || unit.mode.is_doc_test() { @@ -476,27 +476,40 @@ } if let Some(args) = extra_args { - bcx.extra_compiler_args.insert(*unit, args.clone()); + extra_compiler_args + .entry(unit.clone()) + .or_default() + .extend(args); } } } - let unit_dependencies = build_unit_dependencies( - &bcx, + let unit_graph = build_unit_dependencies( + ws, + &pkg_set, &resolve, &resolved_features, std_resolve_features.as_ref(), &units, &std_roots, + build_config.mode, + &target_data, + &profiles, + interner, )?; - let ret = { - let _p = profile::start("compiling"); - let cx = Context::new(config, &bcx, unit_dependencies, build_config.requested_kind)?; - cx.compile(&units, export_dir.clone(), exec)? - }; + let bcx = BuildContext::new( + ws, + pkg_set, + build_config, + profiles, + extra_compiler_args, + target_data, + units, + unit_graph, + )?; - Ok(ret) + Ok(bcx) } impl FilterRule { @@ -607,16 +620,18 @@ pub fn need_dev_deps(&self, mode: CompileMode) -> bool { match mode { CompileMode::Test | CompileMode::Doctest | CompileMode::Bench => true, - CompileMode::Build | CompileMode::Doc { .. } | CompileMode::Check { .. } => match *self - { - CompileFilter::Default { .. } => false, - CompileFilter::Only { - ref examples, - ref tests, - ref benches, - .. - } => examples.is_specific() || tests.is_specific() || benches.is_specific(), - }, + CompileMode::Check { test: true } => true, + CompileMode::Build | CompileMode::Doc { .. } | CompileMode::Check { test: false } => { + match *self { + CompileFilter::Default { .. } => false, + CompileFilter::Only { + ref examples, + ref tests, + ref benches, + .. + } => examples.is_specific() || tests.is_specific() || benches.is_specific(), + } + } CompileMode::RunCustomBuild => panic!("Invalid mode"), } } @@ -679,90 +694,103 @@ /// Generates all the base targets for the packages the user has requested to /// compile. Dependencies for these targets are computed later in `unit_dependencies`. -fn generate_targets<'a>( +fn generate_targets( ws: &Workspace<'_>, - packages: &[&'a Package], + packages: &[&Package], filter: &CompileFilter, - default_arch_kind: CompileKind, - resolve: &'a Resolve, + requested_kinds: &[CompileKind], + mode: CompileMode, + resolve: &Resolve, resolved_features: &features::ResolvedFeatures, - bcx: &BuildContext<'a, '_>, -) -> CargoResult>> { - // Helper for creating a `Unit` struct. - let new_unit = |pkg: &'a Package, target: &'a Target, target_mode: CompileMode| { - let unit_for = if target_mode.is_any_test() { - // NOTE: the `UnitFor` here is subtle. If you have a profile - // with `panic` set, the `panic` flag is cleared for - // tests/benchmarks and their dependencies. If this - // was `normal`, then the lib would get compiled three - // times (once with panic, once without, and once with - // `--test`). - // - // This would cause a problem for doc tests, which would fail - // because `rustdoc` would attempt to link with both libraries - // at the same time. Also, it's probably not important (or - // even desirable?) for rustdoc to link with a lib with - // `panic` set. - // - // As a consequence, Examples and Binaries get compiled - // without `panic` set. This probably isn't a bad deal. - // - // Forcing the lib to be compiled three times during `cargo - // test` is probably also not desirable. - UnitFor::new_test(bcx.config) - } else if target.for_host() { - // Proc macro / plugin should not have `panic` set. - UnitFor::new_compiler() - } else { - UnitFor::new_normal() - }; - // Custom build units are added in `build_unit_dependencies`. - assert!(!target.is_custom_build()); - let target_mode = match target_mode { - CompileMode::Test => { - if target.is_example() && !filter.is_specific() && !target.tested() { - // Examples are included as regular binaries to verify - // that they compile. - CompileMode::Build - } else { - CompileMode::Test + package_set: &PackageSet<'_>, + profiles: &Profiles, + interner: &UnitInterner, +) -> CargoResult> { + let config = ws.config(); + // Helper for creating a list of `Unit` structures + let new_unit = + |units: &mut HashSet, pkg: &Package, target: &Target, target_mode: CompileMode| { + let unit_for = if target_mode.is_any_test() { + // NOTE: the `UnitFor` here is subtle. If you have a profile + // with `panic` set, the `panic` flag is cleared for + // tests/benchmarks and their dependencies. If this + // was `normal`, then the lib would get compiled three + // times (once with panic, once without, and once with + // `--test`). + // + // This would cause a problem for doc tests, which would fail + // because `rustdoc` would attempt to link with both libraries + // at the same time. Also, it's probably not important (or + // even desirable?) for rustdoc to link with a lib with + // `panic` set. + // + // As a consequence, Examples and Binaries get compiled + // without `panic` set. This probably isn't a bad deal. + // + // Forcing the lib to be compiled three times during `cargo + // test` is probably also not desirable. + UnitFor::new_test(config) + } else if target.for_host() { + // Proc macro / plugin should not have `panic` set. + UnitFor::new_compiler() + } else { + UnitFor::new_normal() + }; + // Custom build units are added in `build_unit_dependencies`. + assert!(!target.is_custom_build()); + let target_mode = match target_mode { + CompileMode::Test => { + if target.is_example() && !filter.is_specific() && !target.tested() { + // Examples are included as regular binaries to verify + // that they compile. + CompileMode::Build + } else { + CompileMode::Test + } } + CompileMode::Build => match *target.kind() { + TargetKind::Test => CompileMode::Test, + TargetKind::Bench => CompileMode::Bench, + _ => CompileMode::Build, + }, + // `CompileMode::Bench` is only used to inform `filter_default_targets` + // which command is being used (`cargo bench`). Afterwards, tests + // and benches are treated identically. Switching the mode allows + // de-duplication of units that are essentially identical. For + // example, `cargo build --all-targets --release` creates the units + // (lib profile:bench, mode:test) and (lib profile:bench, mode:bench) + // and since these are the same, we want them to be de-duplicated in + // `unit_dependencies`. + CompileMode::Bench => CompileMode::Test, + _ => target_mode, + }; + + let is_local = pkg.package_id().source_id().is_path(); + let profile = profiles.get_profile( + pkg.package_id(), + ws.is_member(pkg), + is_local, + unit_for, + target_mode, + ); + + // No need to worry about build-dependencies, roots are never build dependencies. + let features_for = FeaturesFor::from_for_host(target.proc_macro()); + let features = resolved_features.activated_features(pkg.package_id(), features_for); + + for kind in requested_kinds { + let unit = interner.intern( + pkg, + target, + profile, + kind.for_target(target), + target_mode, + features.clone(), + /*is_std*/ false, + ); + units.insert(unit); } - CompileMode::Build => match *target.kind() { - TargetKind::Test => CompileMode::Test, - TargetKind::Bench => CompileMode::Bench, - _ => CompileMode::Build, - }, - // `CompileMode::Bench` is only used to inform `filter_default_targets` - // which command is being used (`cargo bench`). Afterwards, tests - // and benches are treated identically. Switching the mode allows - // de-duplication of units that are essentially identical. For - // example, `cargo build --all-targets --release` creates the units - // (lib profile:bench, mode:test) and (lib profile:bench, mode:bench) - // and since these are the same, we want them to be de-duplicated in - // `unit_dependencies`. - CompileMode::Bench => CompileMode::Test, - _ => target_mode, }; - let kind = default_arch_kind.for_target(target); - let profile = - bcx.profiles - .get_profile(pkg.package_id(), ws.is_member(pkg), unit_for, target_mode); - - let features = Vec::from(resolved_features.activated_features( - pkg.package_id(), - FeaturesFor::NormalOrDev, // Root units are never build dependencies. - )); - bcx.units.intern( - pkg, - target, - profile, - kind, - target_mode, - features, - /*is_std*/ false, - ) - }; // Create a list of proposed targets. let mut proposals: Vec> = Vec::new(); @@ -772,14 +800,14 @@ required_features_filterable, } => { for pkg in packages { - let default = filter_default_targets(pkg.targets(), bcx.build_config.mode); + let default = filter_default_targets(pkg.targets(), mode); proposals.extend(default.into_iter().map(|target| Proposal { pkg, target, requires_features: !required_features_filterable, - mode: bcx.build_config.mode, + mode, })); - if bcx.build_config.mode == CompileMode::Test { + if mode == CompileMode::Test { if let Some(t) = pkg .targets() .iter() @@ -805,14 +833,14 @@ } => { if *lib != LibRule::False { let mut libs = Vec::new(); - for proposal in - filter_targets(packages, Target::is_lib, false, bcx.build_config.mode) - { + for proposal in filter_targets(packages, Target::is_lib, false, mode) { let Proposal { target, pkg, .. } = proposal; - if bcx.build_config.mode.is_doc_test() && !target.doctestable() { + if mode.is_doc_test() && !target.doctestable() { + let types = target.rustc_crate_types(); + let types_str: Vec<&str> = types.iter().map(|t| t.as_str()).collect(); ws.config().shell().warn(format!( "doc tests are not supported for crate type(s) `{}` in package `{}`", - target.rustc_crate_types().join(", "), + types_str.join(", "), pkg.name() ))?; } else { @@ -836,10 +864,10 @@ FilterRule::All => Target::tested, FilterRule::Just(_) => Target::is_test, }; - let test_mode = match bcx.build_config.mode { + let test_mode = match mode { CompileMode::Build => CompileMode::Test, CompileMode::Check { .. } => CompileMode::Check { test: true }, - _ => bcx.build_config.mode, + _ => mode, }; // If `--benches` was specified, add all targets that would be // generated by `cargo bench`. @@ -847,10 +875,10 @@ FilterRule::All => Target::benched, FilterRule::Just(_) => Target::is_bench, }; - let bench_mode = match bcx.build_config.mode { + let bench_mode = match mode { CompileMode::Build => CompileMode::Bench, CompileMode::Check { .. } => CompileMode::Check { test: true }, - _ => bcx.build_config.mode, + _ => mode, }; proposals.extend(list_rule_targets( @@ -858,14 +886,14 @@ bins, "bin", Target::is_bin, - bcx.build_config.mode, + mode, )?); proposals.extend(list_rule_targets( packages, examples, "example", Target::is_example, - bcx.build_config.mode, + mode, )?); proposals.extend(list_rule_targets( packages, @@ -902,15 +930,14 @@ let unavailable_features = match target.required_features() { Some(rf) => { let features = features_map.entry(pkg).or_insert_with(|| { - resolve_all_features(resolve, resolved_features, pkg.package_id()) + resolve_all_features(resolve, resolved_features, package_set, pkg.package_id()) }); rf.iter().filter(|f| !features.contains(*f)).collect() } None => Vec::new(), }; if target.is_lib() || unavailable_features.is_empty() { - let unit = new_unit(pkg, target, mode); - units.insert(unit); + new_unit(&mut units, pkg, target, mode); } else if requires_features { let required_features = target.required_features().unwrap(); let quoted_required_features: Vec = required_features @@ -936,9 +963,10 @@ /// /// Dependencies are added as `dep_name/feat_name` because `required-features` /// wants to support that syntax. -fn resolve_all_features( +pub fn resolve_all_features( resolve_with_overrides: &Resolve, resolved_features: &features::ResolvedFeatures, + package_set: &PackageSet<'_>, package_id: PackageId, ) -> HashSet { let mut features: HashSet = resolved_features @@ -950,14 +978,17 @@ // Include features enabled for use by dependencies so targets can also use them with the // required-features field when deciding whether to be built or skipped. for (dep_id, deps) in resolve_with_overrides.deps(package_id) { + let is_proc_macro = package_set + .get_one(dep_id) + .expect("packages downloaded") + .proc_macro(); for dep in deps { - let features_for = if dep.is_build() { - FeaturesFor::BuildDep - } else { - FeaturesFor::NormalOrDev - }; - for feature in resolved_features.activated_features(dep_id, features_for) { - features.insert(dep.name_in_toml().to_string() + "/" + &feature); + let features_for = FeaturesFor::from_for_host(is_proc_macro || dep.is_build()); + for feature in resolved_features + .activated_features_unverified(dep_id, features_for) + .unwrap_or_default() + { + features.insert(format!("{}/{}", dep.name_in_toml(), feature)); } } } @@ -1067,35 +1098,3 @@ } proposals } - -/// When using `-Zbuild-std` we're building the standard library, but a -/// technical detail of the standard library right now is that it builds itself -/// as both an `rlib` and a `dylib`. We don't actually want to really publicize -/// the `dylib` and in general it's a pain to work with, so when building libstd -/// we want to remove the `dylib` crate type. -/// -/// Cargo doesn't have a fantastic way of doing that right now, so let's hack -/// around it a bit and (ab)use the fact that we have mutable access to -/// `PackageSet` here to rewrite downloaded packages. We iterate over all `path` -/// packages (which should download immediately and not actually cause blocking -/// here) and edit their manifests to only list one `LibKind` for an `Rlib`. -fn remove_dylib_crate_type(set: &mut PackageSet<'_>) -> CargoResult<()> { - let ids = set - .package_ids() - .filter(|p| p.source_id().is_path()) - .collect::>(); - set.get_many(ids.iter().cloned())?; - - for id in ids { - let pkg = set.lookup_mut(id).expect("should be downloaded now"); - - for target in pkg.manifest_mut().targets_mut() { - if let TargetKind::Lib(crate_types) = target.kind_mut() { - crate_types.truncate(0); - crate_types.push(LibKind::Rlib); - } - } - } - - Ok(()) -} diff -Nru cargo-0.44.1/src/cargo/ops/cargo_doc.rs cargo-0.47.0/src/cargo/ops/cargo_doc.rs --- cargo-0.44.1/src/cargo/ops/cargo_doc.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/cargo_doc.rs 2020-07-17 20:39:39.000000000 +0000 @@ -9,15 +9,15 @@ /// Strongly typed options for the `cargo doc` command. #[derive(Debug)] -pub struct DocOptions<'a> { +pub struct DocOptions { /// Whether to attempt to open the browser after compiling the docs pub open_result: bool, /// Options to pass through to the compiler - pub compile_opts: ops::CompileOptions<'a>, + pub compile_opts: ops::CompileOptions, } /// Main method for `cargo doc`. -pub fn doc(ws: &Workspace<'_>, options: &DocOptions<'_>) -> CargoResult<()> { +pub fn doc(ws: &Workspace<'_>, options: &DocOptions) -> CargoResult<()> { let specs = options.compile_opts.spec.to_package_id_specs(ws)?; let opts = ResolveOpts::new( /*dev_deps*/ true, @@ -25,21 +25,18 @@ options.compile_opts.all_features, !options.compile_opts.no_default_features, ); - let requested_kind = options.compile_opts.build_config.requested_kind; - let target_data = RustcTargetData::new(ws, requested_kind)?; + let target_data = RustcTargetData::new(ws, &options.compile_opts.build_config.requested_kinds)?; let ws_resolve = ops::resolve_ws_with_opts( ws, &target_data, - requested_kind, + &options.compile_opts.build_config.requested_kinds, &opts, &specs, HasDevUnits::No, + crate::core::resolver::features::ForceAllTargets::No, )?; - let ids = specs - .iter() - .map(|s| s.query(ws_resolve.targeted_resolve.iter())) - .collect::>>()?; + let ids = ws_resolve.targeted_resolve.specs_to_ids(&specs)?; let pkgs = ws_resolve.pkg_set.get_many(ids)?; let mut lib_names = HashMap::new(); @@ -72,20 +69,25 @@ } } + let open_kind = if options.open_result { + Some(options.compile_opts.build_config.single_requested_kind()?) + } else { + None + }; + let compilation = ops::compile(ws, &options.compile_opts)?; - if options.open_result { + if let Some(kind) = open_kind { let name = match names.first() { Some(s) => s.to_string(), None => return Ok(()), }; - let path = compilation - .root_output + let path = compilation.root_output[&kind] .with_file_name("doc") .join(&name) .join("index.html"); if path.exists() { - let mut shell = options.compile_opts.config.shell(); + let mut shell = ws.config().shell(); shell.status("Opening", path.display())?; open_docs(&path, &mut shell)?; } @@ -107,10 +109,8 @@ } None => { if let Err(e) = opener::open(&path) { - shell.warn(format!("Couldn't open docs: {}", e))?; - for cause in anyhow::Error::new(e).chain().skip(1) { - shell.warn(format!("Caused by:\n {}", cause))?; - } + let e = e.into(); + crate::display_warning_with_error("couldn't open docs", &e, shell); } } }; diff -Nru cargo-0.44.1/src/cargo/ops/cargo_fetch.rs cargo-0.47.0/src/cargo/ops/cargo_fetch.rs --- cargo-0.44.1/src/cargo/ops/cargo_fetch.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/cargo_fetch.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,4 +1,4 @@ -use crate::core::compiler::{BuildConfig, CompileMode, TargetInfo}; +use crate::core::compiler::{BuildConfig, CompileMode, RustcTargetData}; use crate::core::{PackageSet, Resolve, Workspace}; use crate::ops; use crate::util::CargoResult; @@ -8,7 +8,7 @@ pub struct FetchOptions<'a> { pub config: &'a Config, /// The target arch triple to fetch dependencies for - pub target: Option, + pub targets: Vec, } /// Executes `cargo fetch`. @@ -21,14 +21,8 @@ let jobs = Some(1); let config = ws.config(); - let build_config = BuildConfig::new(config, jobs, &options.target, CompileMode::Build)?; - let rustc = config.load_global_rustc(Some(ws))?; - let target_info = TargetInfo::new( - config, - build_config.requested_kind, - &rustc, - build_config.requested_kind, - )?; + let build_config = BuildConfig::new(config, jobs, &options.targets, CompileMode::Build)?; + let data = RustcTargetData::new(ws, &build_config.requested_kinds)?; let mut fetched_packages = HashSet::new(); let mut deps_to_fetch = ws.members().map(|p| p.package_id()).collect::>(); let mut to_download = Vec::new(); @@ -43,20 +37,21 @@ .deps(id) .filter(|&(_id, deps)| { deps.iter().any(|d| { - // If no target was specified then all dependencies can - // be fetched. - let target = match options.target { - Some(ref t) => t, - None => return true, - }; - // If this dependency is only available for certain - // platforms, make sure we're only fetching it for that - // platform. - let platform = match d.platform() { - Some(p) => p, - None => return true, - }; - platform.matches(target, target_info.cfg()) + // If no target was specified then all dependencies are + // fetched. + if options.targets.is_empty() { + return true; + } + + // Otherwise we only download this dependency if any of the + // requested platforms would match this dependency. Note + // that this is a bit lossy because not all dependencies are + // always compiled for all platforms, but it should be + // "close enough" for now. + build_config + .requested_kinds + .iter() + .any(|kind| data.dep_platform_activated(d, *kind)) }) }) .map(|(id, _deps)| id); diff -Nru cargo-0.44.1/src/cargo/ops/cargo_generate_lockfile.rs cargo-0.47.0/src/cargo/ops/cargo_generate_lockfile.rs --- cargo-0.44.1/src/cargo/ops/cargo_generate_lockfile.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/cargo_generate_lockfile.rs 2020-07-17 20:39:39.000000000 +0000 @@ -5,7 +5,7 @@ use crate::core::registry::PackageRegistry; use crate::core::resolver::ResolveOpts; -use crate::core::PackageId; +use crate::core::{PackageId, PackageIdSpec}; use crate::core::{Resolve, SourceId, Workspace}; use crate::ops; use crate::util::config::Config; @@ -79,6 +79,7 @@ if opts.to_update.is_empty() { to_avoid.extend(previous_resolve.iter()); + to_avoid.extend(previous_resolve.unused_patches()); } else { let mut sources = Vec::new(); for name in opts.to_update.iter() { @@ -102,6 +103,11 @@ None => dep.source_id().with_precise(None), }); } + if let Ok(unused_id) = + PackageIdSpec::query_str(name, previous_resolve.unused_patches().iter().cloned()) + { + to_avoid.insert(unused_id); + } } registry.add_sources(sources)?; diff -Nru cargo-0.44.1/src/cargo/ops/cargo_install.rs cargo-0.47.0/src/cargo/ops/cargo_install.rs --- cargo-0.44.1/src/cargo/ops/cargo_install.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/cargo_install.rs 2020-07-17 20:39:39.000000000 +0000 @@ -4,17 +4,17 @@ use std::{env, fs}; use anyhow::{bail, format_err}; +use semver::VersionReq; use tempfile::Builder as TempFileBuilder; use crate::core::compiler::Freshness; -use crate::core::compiler::{CompileKind, DefaultExecutor, Executor, RustcTargetData}; -use crate::core::resolver::{HasDevUnits, ResolveOpts}; -use crate::core::{Edition, Package, PackageId, PackageIdSpec, Source, SourceId, Workspace}; -use crate::ops; +use crate::core::compiler::{CompileKind, DefaultExecutor, Executor}; +use crate::core::{Dependency, Edition, Package, PackageId, Source, SourceId, Workspace}; use crate::ops::common_for_install_and_uninstall::*; -use crate::sources::{GitSource, SourceConfigMap}; +use crate::sources::{GitSource, PathSource, SourceConfigMap}; use crate::util::errors::{CargoResult, CargoResultExt}; -use crate::util::{paths, Config, Filesystem}; +use crate::util::{paths, Config, Filesystem, Rustc, ToSemver}; +use crate::{drop_println, ops}; struct Transaction { bins: Vec, @@ -35,20 +35,22 @@ } pub fn install( + config: &Config, root: Option<&str>, krates: Vec<&str>, source_id: SourceId, from_cwd: bool, vers: Option<&str>, - opts: &ops::CompileOptions<'_>, + opts: &ops::CompileOptions, force: bool, no_track: bool, ) -> CargoResult<()> { - let root = resolve_root(root, opts.config)?; - let map = SourceConfigMap::new(opts.config)?; + let root = resolve_root(root, config)?; + let map = SourceConfigMap::new(config)?; let (installed_anything, scheduled_error) = if krates.len() <= 1 { install_one( + config, &root, &map, krates.into_iter().next(), @@ -64,11 +66,14 @@ } else { let mut succeeded = vec![]; let mut failed = vec![]; - let mut first = true; + // "Tracks whether or not the source (such as a registry or git repo) has been updated. + // This is used to avoid updating it multiple times when installing multiple crates. + let mut did_update = false; for krate in krates { let root = root.clone(); let map = map.clone(); match install_one( + config, &root, &map, Some(krate), @@ -78,15 +83,19 @@ opts, force, no_track, - first, + !did_update, ) { - Ok(()) => succeeded.push(krate), + Ok(still_needs_update) => { + succeeded.push(krate); + did_update |= !still_needs_update; + } Err(e) => { - crate::display_error(&e, &mut opts.config.shell()); - failed.push(krate) + crate::display_error(&e, &mut config.shell()); + failed.push(krate); + // We assume an update was performed if we got an error. + did_update = true; } } - first = false; } let mut summary = vec![]; @@ -100,7 +109,7 @@ )); } if !succeeded.is_empty() || !failed.is_empty() { - opts.config.shell().status("Summary", summary.join(" "))?; + config.shell().status("Summary", summary.join(" "))?; } (!succeeded.is_empty(), !failed.is_empty()) @@ -117,7 +126,7 @@ } } - opts.config.shell().warn(&format!( + config.shell().warn(&format!( "be sure to add `{}` to your PATH to be \ able to run the installed binaries", dst.display() @@ -131,86 +140,137 @@ Ok(()) } +// Returns whether a subsequent call should attempt to update again. +// The `needs_update_if_source_is_index` parameter indicates whether or not the source index should +// be updated. This is used ensure it is only updated once when installing multiple crates. +// The return value here is used so that the caller knows what to pass to the +// `needs_update_if_source_is_index` parameter when `install_one` is called again. fn install_one( + config: &Config, root: &Filesystem, map: &SourceConfigMap<'_>, krate: Option<&str>, source_id: SourceId, from_cwd: bool, vers: Option<&str>, - opts: &ops::CompileOptions<'_>, + opts: &ops::CompileOptions, force: bool, no_track: bool, - is_first_install: bool, -) -> CargoResult<()> { - let config = opts.config; - - let pkg = if source_id.is_git() { - select_pkg( - GitSource::new(source_id, config)?, - krate, - vers, - config, - true, - &mut |git| git.read_packages(), - )? - } else if source_id.is_path() { - let mut src = path_source(source_id, config)?; - if !src.path().is_dir() { + needs_update_if_source_is_index: bool, +) -> CargoResult { + if let Some(name) = krate { + if name == "." { bail!( - "`{}` is not a directory. \ - --path must point to a directory containing a Cargo.toml file.", - src.path().display() + "To install the binaries for the package in current working \ + directory use `cargo install --path .`. \ + Use `cargo build` if you want to simply build the package." ) } - if !src.path().join("Cargo.toml").exists() { - if from_cwd { - bail!( - "`{}` is not a crate root; specify a crate to \ - install from crates.io, or use --path or --git to \ - specify an alternate source", - src.path().display() - ); + } + + let dst = root.join("bin").into_path_unlocked(); + + let pkg = { + let dep = { + if let Some(krate) = krate { + let vers = if let Some(vers_flag) = vers { + Some(parse_semver_flag(vers_flag)?.to_string()) + } else { + if source_id.is_registry() { + // Avoid pre-release versions from crate.io + // unless explicitly asked for + Some(String::from("*")) + } else { + None + } + }; + Some(Dependency::parse_no_deprecated( + krate, + vers.as_deref(), + source_id, + )?) } else { + None + } + }; + + if source_id.is_git() { + let mut source = GitSource::new(source_id, config)?; + select_pkg( + &mut source, + dep, + |git: &mut GitSource<'_>| git.read_packages(), + config, + )? + } else if source_id.is_path() { + let mut src = path_source(source_id, config)?; + if !src.path().is_dir() { bail!( - "`{}` does not contain a Cargo.toml file. \ - --path must point to a directory containing a Cargo.toml file.", + "`{}` is not a directory. \ + --path must point to a directory containing a Cargo.toml file.", src.path().display() ) } - } - src.update()?; - select_pkg(src, krate, vers, config, false, &mut |path| { - path.read_packages() - })? - } else { - select_pkg( - map.load(source_id, &HashSet::new())?, - krate, - vers, - config, - is_first_install, - &mut |_| { + if !src.path().join("Cargo.toml").exists() { + if from_cwd { + bail!( + "`{}` is not a crate root; specify a crate to \ + install from crates.io, or use --path or --git to \ + specify an alternate source", + src.path().display() + ); + } else { + bail!( + "`{}` does not contain a Cargo.toml file. \ + --path must point to a directory containing a Cargo.toml file.", + src.path().display() + ) + } + } + select_pkg( + &mut src, + dep, + |path: &mut PathSource<'_>| path.read_packages(), + config, + )? + } else { + if let Some(dep) = dep { + let mut source = map.load(source_id, &HashSet::new())?; + if let Ok(Some(pkg)) = installed_exact_package( + dep.clone(), + &mut source, + config, + opts, + root, + &dst, + force, + ) { + let msg = format!( + "package `{}` is already installed, use --force to override", + pkg + ); + config.shell().status("Ignored", &msg)?; + return Ok(true); + } + select_dep_pkg(&mut source, dep, config, needs_update_if_source_is_index)? + } else { bail!( "must specify a crate to install from \ crates.io, or use --path or --git to \ specify alternate source" ) - }, - )? + } + } }; - let (mut ws, git_package) = if source_id.is_git() { + let (mut ws, rustc, target) = make_ws_rustc_target(config, opts, &source_id, pkg.clone())?; + let pkg = if source_id.is_git() { // Don't use ws.current() in order to keep the package source as a git source so that // install tracking uses the correct source. - (Workspace::new(pkg.manifest_path(), config)?, Some(&pkg)) - } else if source_id.is_path() { - (Workspace::new(pkg.manifest_path(), config)?, None) + pkg } else { - (Workspace::ephemeral(pkg, config, None, false)?, None) + ws.current()?.clone() }; - ws.set_ignore_lock(config.lock_update_allowed()); - ws.set_require_optional_deps(false); let mut td_opt = None; let mut needs_cleanup = false; @@ -228,8 +288,6 @@ ws.set_target_dir(target_dir); } - let pkg = git_package.map_or_else(|| ws.current(), |pkg| Ok(pkg))?; - if from_cwd { if pkg.manifest().edition() == Edition::Edition2015 { config.shell().warn( @@ -255,19 +313,9 @@ bail!("specified package `{}` has no binaries", pkg); } - // Preflight checks to check up front whether we'll overwrite something. - // We have to check this again afterwards, but may as well avoid building - // anything if we're gonna throw it away anyway. - let dst = root.join("bin").into_path_unlocked(); - let rustc = config.load_global_rustc(Some(&ws))?; - let target = match &opts.build_config.requested_kind { - CompileKind::Host => rustc.host.as_str(), - CompileKind::Target(target) => target.short_name(), - }; - // Helper for --no-track flag to make sure it doesn't overwrite anything. let no_track_duplicates = || -> CargoResult>> { - let duplicates: BTreeMap> = exe_names(pkg, &opts.filter) + let duplicates: BTreeMap> = exe_names(&pkg, &opts.filter) .into_iter() .filter(|name| dst.join(name).exists()) .map(|name| (name, None)) @@ -289,22 +337,17 @@ // Check for conflicts. no_track_duplicates()?; } else { - let tracker = InstallTracker::load(config, root)?; - let (freshness, _duplicates) = - tracker.check_upgrade(&dst, pkg, force, opts, target, &rustc.verbose_version)?; - if freshness == Freshness::Fresh { + if is_installed(&pkg, config, opts, &rustc, &target, root, &dst, force)? { let msg = format!( "package `{}` is already installed, use --force to override", pkg ); config.shell().status("Ignored", &msg)?; - return Ok(()); + return Ok(false); } - // Unlock while building. - drop(tracker); } - config.shell().status("Installing", pkg)?; + config.shell().status("Installing", &pkg)?; check_yanked_install(&ws)?; @@ -325,7 +368,7 @@ let mut binaries: Vec<(&str, &Path)> = compile .binaries .iter() - .map(|bin| { + .map(|(_, bin)| { let name = bin.file_name().unwrap(); if let Some(s) = name.to_str() { Ok((s, bin.as_ref())) @@ -345,7 +388,7 @@ } else { let tracker = InstallTracker::load(config, root)?; let (_freshness, duplicates) = - tracker.check_upgrade(&dst, pkg, force, opts, target, &rustc.verbose_version)?; + tracker.check_upgrade(&dst, &pkg, force, opts, &target, &rustc.verbose_version)?; (Some(tracker), duplicates) }; @@ -363,9 +406,7 @@ if !source_id.is_path() && fs::rename(src, &dst).is_ok() { continue; } - fs::copy(src, &dst).chain_err(|| { - format_err!("failed to copy `{}` to `{}`", src.display(), dst.display()) - })?; + paths::copy(src, &dst)?; } let (to_replace, to_install): (Vec<&str>, Vec<&str>) = binaries @@ -408,15 +449,15 @@ if let Some(mut tracker) = tracker { tracker.mark_installed( - pkg, + &pkg, &successful_bins, vers.map(|s| s.to_string()), opts, - target, + &target, &rustc.verbose_version, ); - if let Err(e) = remove_orphaned_bins(&ws, &mut tracker, &duplicates, pkg, &dst) { + if let Err(e) = remove_orphaned_bins(&ws, &mut tracker, &duplicates, &pkg, &dst) { // Don't hard error on remove. config .shell() @@ -458,7 +499,7 @@ "Installed", format!("package `{}` {}", pkg, executables(successful_bins.iter())), )?; - Ok(()) + Ok(false) } else { if !to_install.is_empty() { config.shell().status( @@ -483,7 +524,128 @@ ), )?; } - Ok(()) + Ok(false) + } +} + +fn is_installed( + pkg: &Package, + config: &Config, + opts: &ops::CompileOptions, + rustc: &Rustc, + target: &str, + root: &Filesystem, + dst: &Path, + force: bool, +) -> CargoResult { + let tracker = InstallTracker::load(config, root)?; + let (freshness, _duplicates) = + tracker.check_upgrade(dst, pkg, force, opts, target, &rustc.verbose_version)?; + Ok(freshness == Freshness::Fresh) +} + +/// Checks if vers can only be satisfied by exactly one version of a package in a registry, and it's +/// already installed. If this is the case, we can skip interacting with a registry to check if +/// newer versions may be installable, as no newer version can exist. +fn installed_exact_package( + dep: Dependency, + source: &mut T, + config: &Config, + opts: &ops::CompileOptions, + root: &Filesystem, + dst: &Path, + force: bool, +) -> CargoResult> +where + T: Source, +{ + if !dep.is_locked() { + // If the version isn't exact, we may need to update the registry and look for a newer + // version - we can't know if the package is installed without doing so. + return Ok(None); + } + // Try getting the package from the registry without updating it, to avoid a potentially + // expensive network call in the case that the package is already installed. + // If this fails, the caller will possibly do an index update and try again, this is just a + // best-effort check to see if we can avoid hitting the network. + if let Ok(pkg) = select_dep_pkg(source, dep, config, false) { + let (_ws, rustc, target) = + make_ws_rustc_target(config, opts, &source.source_id(), pkg.clone())?; + if let Ok(true) = is_installed(&pkg, config, opts, &rustc, &target, root, dst, force) { + return Ok(Some(pkg)); + } + } + Ok(None) +} + +fn make_ws_rustc_target<'cfg>( + config: &'cfg Config, + opts: &ops::CompileOptions, + source_id: &SourceId, + pkg: Package, +) -> CargoResult<(Workspace<'cfg>, Rustc, String)> { + let mut ws = if source_id.is_git() || source_id.is_path() { + Workspace::new(pkg.manifest_path(), config)? + } else { + Workspace::ephemeral(pkg, config, None, false)? + }; + ws.set_ignore_lock(config.lock_update_allowed()); + ws.set_require_optional_deps(false); + + let rustc = config.load_global_rustc(Some(&ws))?; + let target = match &opts.build_config.single_requested_kind()? { + CompileKind::Host => rustc.host.as_str().to_owned(), + CompileKind::Target(target) => target.short_name().to_owned(), + }; + + Ok((ws, rustc, target)) +} + +/// Parses x.y.z as if it were =x.y.z, and gives CLI-specific error messages in the case of invalid +/// values. +fn parse_semver_flag(v: &str) -> CargoResult { + // If the version begins with character <, >, =, ^, ~ parse it as a + // version range, otherwise parse it as a specific version + let first = v + .chars() + .next() + .ok_or_else(|| format_err!("no version provided for the `--vers` flag"))?; + + let is_req = "<>=^~".contains(first) || v.contains('*'); + if is_req { + match v.parse::() { + Ok(v) => Ok(v), + Err(_) => bail!( + "the `--vers` provided, `{}`, is \ + not a valid semver version requirement\n\n\ + Please have a look at \ + https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html \ + for the correct format", + v + ), + } + } else { + match v.to_semver() { + Ok(v) => Ok(VersionReq::exact(&v)), + Err(e) => { + let mut msg = format!( + "the `--vers` provided, `{}`, is \ + not a valid semver version: {}\n", + v, e + ); + + // If it is not a valid version but it is a valid version + // requirement, add a note to the warning + if v.parse::().is_ok() { + msg.push_str(&format!( + "\nif you want to specify semver range, \ + add an explicit qualifier, like ^{}", + v + )); + } + bail!(msg); + } + } } } @@ -491,30 +653,17 @@ if ws.ignore_lock() || !ws.root().join("Cargo.lock").exists() { return Ok(()); } - let specs = vec![PackageIdSpec::from_package_id(ws.current()?.package_id())]; - // CompileKind here doesn't really matter, it's only needed for features. - let target_data = RustcTargetData::new(ws, CompileKind::Host)?; // It would be best if `source` could be passed in here to avoid a // duplicate "Updating", but since `source` is taken by value, then it // wouldn't be available for `compile_ws`. - // TODO: It would be easier to use resolve_ws, but it does not honor - // require_optional_deps to avoid writing the lock file. It might be good - // to try to fix that. - let ws_resolve = ops::resolve_ws_with_opts( - ws, - &target_data, - CompileKind::Host, - &ResolveOpts::everything(), - &specs, - HasDevUnits::No, - )?; - let mut sources = ws_resolve.pkg_set.sources_mut(); + let (pkg_set, resolve) = ops::resolve_ws(ws)?; + let mut sources = pkg_set.sources_mut(); // Checking the yanked status involves taking a look at the registry and // maybe updating files, so be sure to lock it here. let _lock = ws.config().acquire_package_cache_lock()?; - for pkg_id in ws_resolve.targeted_resolve.iter() { + for pkg_id in resolve.iter() { if let Some(source) = sources.get_mut(pkg_id.source_id()) { if source.is_yanked(pkg_id)? { ws.config().shell().warn(format!( @@ -535,9 +684,9 @@ let root = resolve_root(dst, config)?; let tracker = InstallTracker::load(config, &root)?; for (k, v) in tracker.all_installed_bins() { - println!("{}:", k); + drop_println!(config, "{}:", k); for bin in v { - println!(" {}", bin); + drop_println!(config, " {}", bin); } } Ok(()) diff -Nru cargo-0.44.1/src/cargo/ops/cargo_new.rs cargo-0.47.0/src/cargo/ops/cargo_new.rs --- cargo-0.44.1/src/cargo/ops/cargo_new.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/cargo_new.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,7 +1,7 @@ -use crate::core::{compiler, Workspace}; +use crate::core::{Shell, Workspace}; use crate::util::errors::{CargoResult, CargoResultExt}; use crate::util::{existing_vcs_repo, FossilRepo, GitRepo, HgRepo, PijulRepo}; -use crate::util::{paths, validate_package_name, Config}; +use crate::util::{paths, restricted_names, Config}; use git2::Config as GitConfig; use git2::Repository as GitRepository; use serde::de; @@ -9,7 +9,6 @@ use std::collections::BTreeMap; use std::env; use std::fmt; -use std::fs; use std::io::{BufRead, BufReader, ErrorKind}; use std::path::{Path, PathBuf}; use std::process::Command; @@ -155,41 +154,71 @@ }) } -fn check_name(name: &str, opts: &NewOptions) -> CargoResult<()> { - // If --name is already used to override, no point in suggesting it - // again as a fix. - let name_help = match opts.name { - Some(_) => "", - None => "\nuse --name to override crate name", - }; +fn check_name(name: &str, name_help: &str, has_bin: bool, shell: &mut Shell) -> CargoResult<()> { + restricted_names::validate_package_name(name, "crate name", name_help)?; - // Ban keywords + test list found at - // https://doc.rust-lang.org/reference/keywords.html - let blacklist = [ - "abstract", "alignof", "as", "become", "box", "break", "const", "continue", "crate", "do", - "else", "enum", "extern", "false", "final", "fn", "for", "if", "impl", "in", "let", "loop", - "macro", "match", "mod", "move", "mut", "offsetof", "override", "priv", "proc", "pub", - "pure", "ref", "return", "self", "sizeof", "static", "struct", "super", "test", "trait", - "true", "type", "typeof", "unsafe", "unsized", "use", "virtual", "where", "while", "yield", - ]; - if blacklist.contains(&name) || (opts.kind.is_bin() && compiler::is_bad_artifact_name(name)) { + if restricted_names::is_keyword(name) { anyhow::bail!( - "The name `{}` cannot be used as a crate name{}", + "the name `{}` cannot be used as a crate name, it is a Rust keyword{}", name, name_help - ) + ); } - - if let Some(ref c) = name.chars().next() { - if c.is_digit(10) { + if restricted_names::is_conflicting_artifact_name(name) { + if has_bin { + anyhow::bail!( + "the name `{}` cannot be used as a crate name, \ + it conflicts with cargo's build directory names{}", + name, + name_help + ); + } else { + shell.warn(format!( + "the name `{}` will not support binary \ + executables with that name, \ + it conflicts with cargo's build directory names", + name + ))?; + } + } + if name == "test" { + anyhow::bail!( + "the name `test` cannot be used as a crate name, \ + it conflicts with Rust's built-in test library{}", + name_help + ); + } + if ["core", "std", "alloc", "proc_macro", "proc-macro"].contains(&name) { + shell.warn(format!( + "the name `{}` is part of Rust's standard library\n\ + It is recommended to use a different name to avoid problems.", + name + ))?; + } + if restricted_names::is_windows_reserved(name) { + if cfg!(windows) { anyhow::bail!( - "Package names starting with a digit cannot be used as a crate name{}", + "cannot use name `{}`, it is a reserved Windows filename{}", + name, name_help - ) + ); + } else { + shell.warn(format!( + "the name `{}` is a reserved Windows filename\n\ + This package will not work on Windows platforms.", + name + ))?; } } + if restricted_names::is_non_ascii_name(name) { + shell.warn(format!( + "the name `{}` contains non-ASCII characters\n\ + Support for non-ASCII crate names is experimental and only valid \ + on the nightly toolchain.", + name + ))?; + } - validate_package_name(name, "crate name", name_help)?; Ok(()) } @@ -243,10 +272,7 @@ let pp = i.proposed_path; // path/pp does not exist or is not a file - if !fs::metadata(&path.join(&pp)) - .map(|x| x.is_file()) - .unwrap_or(false) - { + if !path.join(&pp).is_file() { continue; } @@ -328,7 +354,7 @@ pub fn new(opts: &NewOptions, config: &Config) -> CargoResult<()> { let path = &opts.path; - if fs::metadata(path).is_ok() { + if path.exists() { anyhow::bail!( "destination `{}` already exists\n\n\ Use `cargo init` to initialize the directory", @@ -337,7 +363,7 @@ } let name = get_name(path, opts)?; - check_name(name, opts)?; + check_name(name, "", opts.kind.is_bin(), &mut config.shell())?; let mkopts = MkOptions { version_control: opts.version_control, @@ -345,8 +371,8 @@ name, source_files: vec![plan_new_source_file(opts.kind.is_bin(), name.to_string())], bin: opts.kind.is_bin(), - edition: opts.edition.as_ref().map(|s| &**s), - registry: opts.registry.as_ref().map(|s| &**s), + edition: opts.edition.as_deref(), + registry: opts.registry.as_deref(), }; mk(config, &mkopts).chain_err(|| { @@ -367,12 +393,11 @@ let path = &opts.path; - if fs::metadata(&path.join("Cargo.toml")).is_ok() { + if path.join("Cargo.toml").exists() { anyhow::bail!("`cargo init` cannot be run on existing Cargo packages") } let name = get_name(path, opts)?; - check_name(name, opts)?; let mut src_paths_types = vec![]; @@ -385,28 +410,36 @@ // Maybe when doing `cargo init --bin` inside a library package stub, // user may mean "initialize for library, but also add binary target" } + let has_bin = src_paths_types.iter().any(|x| x.bin); + // If --name is already used to override, no point in suggesting it + // again as a fix. + let name_help = match opts.name { + Some(_) => "", + None => "\nuse --name to override crate name", + }; + check_name(name, name_help, has_bin, &mut config.shell())?; let mut version_control = opts.version_control; if version_control == None { let mut num_detected_vsces = 0; - if fs::metadata(&path.join(".git")).is_ok() { + if path.join(".git").exists() { version_control = Some(VersionControl::Git); num_detected_vsces += 1; } - if fs::metadata(&path.join(".hg")).is_ok() { + if path.join(".hg").exists() { version_control = Some(VersionControl::Hg); num_detected_vsces += 1; } - if fs::metadata(&path.join(".pijul")).is_ok() { + if path.join(".pijul").exists() { version_control = Some(VersionControl::Pijul); num_detected_vsces += 1; } - if fs::metadata(&path.join(".fossil")).is_ok() { + if path.join(".fossil").exists() { version_control = Some(VersionControl::Fossil); num_detected_vsces += 1; } @@ -426,10 +459,10 @@ version_control, path, name, - bin: src_paths_types.iter().any(|x| x.bin), + bin: has_bin, source_files: src_paths_types, - edition: opts.edition.as_ref().map(|s| &**s), - registry: opts.registry.as_ref().map(|s| &**s), + edition: opts.edition.as_deref(), + registry: opts.registry.as_deref(), }; mk(config, &mkopts).chain_err(|| { @@ -528,10 +561,10 @@ VersionControl::NoVcs => return Ok("".to_string()), }; - let ignore: String = match fs::File::open(&fp_ignore) { - Err(why) => match why.kind() { - ErrorKind::NotFound => list.format_new(vcs), - _ => return Err(anyhow::format_err!("{}", why)), + let ignore: String = match paths::open(&fp_ignore) { + Err(err) => match err.downcast_ref::() { + Some(io_err) if io_err.kind() == ErrorKind::NotFound => list.format_new(vcs), + _ => return Err(err), }, Ok(file) => list.format_existing(BufReader::new(file), vcs), }; @@ -706,10 +739,7 @@ " }; - if !fs::metadata(&path_of_source_file) - .map(|x| x.is_file()) - .unwrap_or(false) - { + if !path_of_source_file.is_file() { paths::write(&path_of_source_file, default_file_content)?; // Format the newly created source file @@ -725,12 +755,12 @@ } if let Err(e) = Workspace::new(&path.join("Cargo.toml"), config) { - let msg = format!( + crate::display_warning_with_error( "compiling this new crate may not work due to invalid \ - workspace configuration\n\n{:?}", - e, + workspace configuration", + &e, + &mut config.shell(), ); - config.shell().warn(msg)?; } Ok(()) diff -Nru cargo-0.44.1/src/cargo/ops/cargo_output_metadata.rs cargo-0.47.0/src/cargo/ops/cargo_output_metadata.rs --- cargo-0.44.1/src/cargo/ops/cargo_output_metadata.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/cargo_output_metadata.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,8 +1,9 @@ -use crate::core::compiler::{CompileKind, CompileTarget, RustcTargetData}; +use crate::core::compiler::{CompileKind, RustcTargetData}; use crate::core::dependency::DepKind; use crate::core::resolver::{HasDevUnits, Resolve, ResolveOpts}; -use crate::core::{Dependency, InternedString, Package, PackageId, Workspace}; +use crate::core::{Dependency, Package, PackageId, Workspace}; use crate::ops::{self, Packages}; +use crate::util::interning::InternedString; use crate::util::CargoResult; use cargo_platform::Platform; use serde::Serialize; @@ -17,7 +18,7 @@ pub all_features: bool, pub no_deps: bool, pub version: u32, - pub filter_platform: Option, + pub filter_platforms: Vec, } /// Loads the manifest, resolves the dependencies of the package to the concrete @@ -46,6 +47,7 @@ target_directory: ws.target_dir().into_path_unlocked(), version: VERSION, workspace_root: ws.root().to_path_buf(), + metadata: ws.custom_metadata().cloned(), }) } @@ -60,6 +62,7 @@ target_directory: PathBuf, version: u32, workspace_root: PathBuf, + metadata: Option, } #[derive(Serialize)] @@ -105,11 +108,9 @@ ) -> CargoResult<(Vec, MetadataResolve)> { // TODO: Without --filter-platform, features are being resolved for `host` only. // How should this work? - let requested_kind = match &metadata_opts.filter_platform { - Some(t) => CompileKind::Target(CompileTarget::new(t)?), - None => CompileKind::Host, - }; - let target_data = RustcTargetData::new(ws, requested_kind)?; + let requested_kinds = + CompileKind::from_requested_targets(ws.config(), &metadata_opts.filter_platforms)?; + let target_data = RustcTargetData::new(ws, &requested_kinds)?; // Resolve entire workspace. let specs = Packages::All.to_package_id_specs(ws)?; let resolve_opts = ResolveOpts::new( @@ -121,10 +122,11 @@ let ws_resolve = ops::resolve_ws_with_opts( ws, &target_data, - requested_kind, + &requested_kinds, &resolve_opts, &specs, HasDevUnits::Yes, + crate::core::resolver::features::ForceAllTargets::No, )?; // Download all Packages. This is needed to serialize the information // for every package. In theory this could honor target filtering, @@ -133,7 +135,8 @@ .pkg_set .get_many(ws_resolve.pkg_set.package_ids())? .into_iter() - .map(|pkg| (pkg.package_id(), pkg.clone())) + // This is a little lazy, but serde doesn't handle Rc fields very well. + .map(|pkg| (pkg.package_id(), Package::clone(pkg))) .collect(); // Start from the workspace roots, and recurse through filling out the @@ -146,7 +149,7 @@ &ws_resolve.targeted_resolve, &package_map, &target_data, - requested_kind, + &requested_kinds, ); } // Get a Vec of Packages. @@ -167,7 +170,7 @@ resolve: &Resolve, package_map: &HashMap, target_data: &RustcTargetData, - requested_kind: CompileKind, + requested_kinds: &[CompileKind], ) { if node_map.contains_key(&pkg_id) { return; @@ -176,19 +179,18 @@ let deps: Vec = resolve .deps(pkg_id) - .filter(|(_dep_id, deps)| match requested_kind { - CompileKind::Target(_) => deps - .iter() - .any(|dep| target_data.dep_platform_activated(dep, requested_kind)), - // No --filter-platform is interpreted as "all platforms". - CompileKind::Host => true, + .filter(|(_dep_id, deps)| { + if requested_kinds == [CompileKind::Host] { + true + } else { + requested_kinds.iter().any(|kind| { + deps.iter() + .any(|dep| target_data.dep_platform_activated(dep, *kind)) + }) + } }) .filter_map(|(dep_id, deps)| { - let mut dep_kinds: Vec<_> = deps.iter().map(DepKindInfo::from).collect(); - // Duplicates may appear if the same package is used by different - // members of a workspace with different features selected. - dep_kinds.sort_unstable(); - dep_kinds.dedup(); + let dep_kinds: Vec<_> = deps.iter().map(DepKindInfo::from).collect(); package_map .get(&dep_id) .and_then(|pkg| pkg.targets().iter().find(|t| t.is_lib())) @@ -216,7 +218,7 @@ resolve, package_map, target_data, - requested_kind, + requested_kinds, ); } } diff -Nru cargo-0.44.1/src/cargo/ops/cargo_package.rs cargo-0.47.0/src/cargo/ops/cargo_package.rs --- cargo-0.44.1/src/cargo/ops/cargo_package.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/cargo_package.rs 2020-07-17 20:39:39.000000000 +0000 @@ -13,14 +13,14 @@ use tar::{Archive, Builder, EntryType, Header}; use crate::core::compiler::{BuildConfig, CompileMode, DefaultExecutor, Executor}; -use crate::core::{Feature, Verbosity, Workspace}; +use crate::core::{Feature, Shell, Verbosity, Workspace}; use crate::core::{Package, PackageId, PackageSet, Resolve, Source, SourceId}; -use crate::ops; use crate::sources::PathSource; use crate::util::errors::{CargoResult, CargoResultExt}; use crate::util::paths; use crate::util::toml::TomlManifest; -use crate::util::{self, Config, FileLock}; +use crate::util::{self, restricted_names, Config, FileLock}; +use crate::{drop_println, ops}; pub struct PackageOpts<'cfg> { pub config: &'cfg Config, @@ -29,7 +29,7 @@ pub allow_dirty: bool, pub verify: bool, pub jobs: Option, - pub target: Option, + pub targets: Vec, pub features: Vec, pub all_features: bool, pub no_default_features: bool, @@ -102,7 +102,7 @@ if opts.list { for ar_file in ar_files { - println!("{}", ar_file.rel_str); + drop_println!(config, "{}", ar_file.rel_str); } return Ok(None); } @@ -151,7 +151,7 @@ let root = pkg.root(); for src_file in src_files { let rel_path = src_file.strip_prefix(&root)?.to_path_buf(); - check_filename(&rel_path)?; + check_filename(&rel_path, &mut ws.config().shell())?; let rel_str = rel_path .to_str() .ok_or_else(|| { @@ -274,7 +274,7 @@ orig_pkg .manifest() .original() - .prepare_for_publish(config, orig_pkg.root())?, + .prepare_for_publish(ws, orig_pkg.root())?, ); let package_root = orig_pkg.root(); let source_id = orig_pkg.package_id().source_id(); @@ -501,28 +501,7 @@ config .shell() .verbose(|shell| shell.status("Archiving", &rel_str))?; - // The `tar::Builder` type by default will build GNU archives, but - // unfortunately we force it here to use UStar archives instead. The - // UStar format has more limitations on the length of path name that it - // can encode, so it's not quite as nice to use. - // - // Older cargos, however, had a bug where GNU archives were interpreted - // as UStar archives. This bug means that if we publish a GNU archive - // which has fully filled out metadata it'll be corrupt when unpacked by - // older cargos. - // - // Hopefully in the future after enough cargos have been running around - // with the bugfixed tar-rs library we'll be able to switch this over to - // GNU archives, but for now we'll just say that you can't encode paths - // in archives that are *too* long. - // - // For an instance of this in the wild, use the tar-rs 0.3.3 library to - // unpack the selectors 0.4.0 crate on crates.io. Either that or take a - // look at rust-lang/cargo#2326. - let mut header = Header::new_ustar(); - header - .set_path(&ar_path) - .chain_err(|| format!("failed to add to archive: `{}`", rel_str))?; + let mut header = Header::new_gnu(); match contents { FileContents::OnDisk(disk_path) => { let mut file = File::open(&disk_path).chain_err(|| { @@ -533,13 +512,14 @@ })?; header.set_metadata(&metadata); header.set_cksum(); - ar.append(&header, &mut file).chain_err(|| { - format!("could not archive source file `{}`", disk_path.display()) - })?; + ar.append_data(&mut header, &ar_path, &mut file) + .chain_err(|| { + format!("could not archive source file `{}`", disk_path.display()) + })?; } FileContents::Generated(generated_kind) => { let contents = match generated_kind { - GeneratedFile::Manifest => pkg.to_registry_toml(config)?, + GeneratedFile::Manifest => pkg.to_registry_toml(ws)?, GeneratedFile::Lockfile => build_lock(ws)?, GeneratedFile::VcsInfo(s) => s, }; @@ -553,7 +533,7 @@ ); header.set_size(contents.len() as u64); header.set_cksum(); - ar.append(&header, contents.as_bytes()) + ar.append_data(&mut header, &ar_path, contents.as_bytes()) .chain_err(|| format!("could not archive source file `{}`", rel_str))?; } } @@ -716,8 +696,7 @@ ops::compile_with_exec( &ws, &ops::CompileOptions { - config, - build_config: BuildConfig::new(config, opts.jobs, &opts.target, CompileMode::Build)?, + build_config: BuildConfig::new(config, opts.jobs, &opts.targets, CompileMode::Build)?, features: opts.features.clone(), no_default_features: opts.no_default_features, all_features: opts.all_features, @@ -729,7 +708,6 @@ target_rustc_args: rustc_args, local_rustdoc_args: None, rustdoc_document_private_items: false, - export_dir: None, }, &exec, )?; @@ -816,7 +794,7 @@ // // To help out in situations like this, issue about weird filenames when // packaging as a "heads up" that something may not work on other platforms. -fn check_filename(file: &Path) -> CargoResult<()> { +fn check_filename(file: &Path, shell: &mut Shell) -> CargoResult<()> { let name = match file.file_name() { Some(name) => name, None => return Ok(()), @@ -837,5 +815,12 @@ file.display() ) } + if restricted_names::is_windows_reserved_path(file) { + shell.warn(format!( + "file {} is a reserved Windows filename, \ + it will not work on Windows platforms", + file.display() + ))?; + } Ok(()) } diff -Nru cargo-0.44.1/src/cargo/ops/cargo_read_manifest.rs cargo-0.47.0/src/cargo/ops/cargo_read_manifest.rs --- cargo-0.44.1/src/cargo/ops/cargo_read_manifest.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/cargo_read_manifest.rs 2020-07-17 20:39:39.000000000 +0000 @@ -60,7 +60,7 @@ } // Don't automatically discover packages across git submodules - if fs::metadata(&dir.join(".git")).is_ok() { + if dir.join(".git").exists() { return Ok(false); } } @@ -112,7 +112,7 @@ Err(e) => { let cx = format!("failed to read directory `{}`", path.display()); let e = anyhow::Error::from(e); - return Err(e.context(cx).into()); + return Err(e.context(cx)); } }; for dir in dirs { diff -Nru cargo-0.44.1/src/cargo/ops/cargo_run.rs cargo-0.47.0/src/cargo/ops/cargo_run.rs --- cargo-0.44.1/src/cargo/ops/cargo_run.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/cargo_run.rs 2020-07-17 20:39:39.000000000 +0000 @@ -8,7 +8,7 @@ pub fn run( ws: &Workspace<'_>, - options: &ops::CompileOptions<'_>, + options: &ops::CompileOptions, args: &[OsString], ) -> CargoResult> { let config = ws.config(); @@ -70,16 +70,19 @@ } } + // `cargo run` is only compatible with one `--target` flag at most + options.build_config.single_requested_kind()?; + let compile = ops::compile(ws, options)?; assert_eq!(compile.binaries.len(), 1); - let exe = &compile.binaries[0]; + let (unit, exe) = &compile.binaries[0]; let exe = match exe.strip_prefix(config.cwd()) { Ok(path) if path.file_name() == Some(path.as_os_str()) => Path::new(".").join(path), Ok(path) => path.to_path_buf(), Err(_) => exe.to_path_buf(), }; let pkg = bins[0].0; - let mut process = compile.target_process(exe, pkg)?; + let mut process = compile.target_process(exe, unit.kind, pkg)?; process.args(args).cwd(config.cwd()); config.shell().status("Running", process.to_string())?; diff -Nru cargo-0.44.1/src/cargo/ops/cargo_test.rs cargo-0.47.0/src/cargo/ops/cargo_test.rs --- cargo-0.44.1/src/cargo/ops/cargo_test.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/cargo_test.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,21 +1,21 @@ use std::ffi::OsString; -use crate::core::compiler::{Compilation, Doctest}; +use crate::core::compiler::{Compilation, CompileKind, Doctest}; use crate::core::shell::Verbosity; use crate::core::Workspace; use crate::ops; use crate::util::errors::CargoResult; -use crate::util::{CargoTestError, ProcessError, Test}; +use crate::util::{CargoTestError, Config, ProcessError, Test}; -pub struct TestOptions<'a> { - pub compile_opts: ops::CompileOptions<'a>, +pub struct TestOptions { + pub compile_opts: ops::CompileOptions, pub no_run: bool, pub no_fail_fast: bool, } pub fn run_tests( ws: &Workspace<'_>, - options: &TestOptions<'_>, + options: &TestOptions, test_args: &[&str], ) -> CargoResult> { let compilation = compile_tests(ws, options)?; @@ -23,14 +23,14 @@ if options.no_run { return Ok(None); } - let (test, mut errors) = run_unit_tests(options, test_args, &compilation)?; + let (test, mut errors) = run_unit_tests(ws.config(), options, test_args, &compilation)?; // If we have an error and want to fail fast, then return. if !errors.is_empty() && !options.no_fail_fast { return Ok(Some(CargoTestError::new(test, errors))); } - let (doctest, docerrors) = run_doc_tests(options, test_args, &compilation)?; + let (doctest, docerrors) = run_doc_tests(ws.config(), options, test_args, &compilation)?; let test = if docerrors.is_empty() { test } else { doctest }; errors.extend(docerrors); if errors.is_empty() { @@ -42,7 +42,7 @@ pub fn run_benches( ws: &Workspace<'_>, - options: &TestOptions<'_>, + options: &TestOptions, args: &[&str], ) -> CargoResult> { let compilation = compile_tests(ws, options)?; @@ -54,7 +54,7 @@ let mut args = args.to_vec(); args.push("--bench"); - let (test, errors) = run_unit_tests(options, &args, &compilation)?; + let (test, errors) = run_unit_tests(ws.config(), options, &args, &compilation)?; match errors.len() { 0 => Ok(None), @@ -62,35 +62,28 @@ } } -fn compile_tests<'a>( - ws: &Workspace<'a>, - options: &TestOptions<'a>, -) -> CargoResult> { +fn compile_tests<'a>(ws: &Workspace<'a>, options: &TestOptions) -> CargoResult> { let mut compilation = ops::compile(ws, &options.compile_opts)?; - compilation - .tests - .sort_by(|a, b| (a.0.package_id(), &a.1, &a.2).cmp(&(b.0.package_id(), &b.1, &b.2))); + compilation.tests.sort(); Ok(compilation) } /// Runs the unit and integration tests of a package. fn run_unit_tests( - options: &TestOptions<'_>, + config: &Config, + options: &TestOptions, test_args: &[&str], compilation: &Compilation<'_>, ) -> CargoResult<(Test, Vec)> { - let config = options.compile_opts.config; - let cwd = options.compile_opts.config.cwd(); - + let cwd = config.cwd(); let mut errors = Vec::new(); - for &(ref pkg, ref target, ref exe) in &compilation.tests { - let kind = target.kind(); - let test = target.name().to_string(); + for (unit, exe) in compilation.tests.iter() { + let test = unit.target.name().to_string(); let exe_display = exe.strip_prefix(cwd).unwrap_or(exe).display(); - let mut cmd = compilation.target_process(exe, pkg)?; + let mut cmd = compilation.target_process(exe, unit.kind, &unit.pkg)?; cmd.args(test_args); - if target.harness() && config.shell().verbosity() == Verbosity::Quiet { + if unit.target.harness() && config.shell().verbosity() == Verbosity::Quiet { cmd.arg("--quiet"); } config @@ -105,7 +98,12 @@ match result { Err(e) => { let e = e.downcast::()?; - errors.push((kind.clone(), test.clone(), pkg.name().to_string(), e)); + errors.push(( + unit.target.kind().clone(), + test.clone(), + unit.pkg.name().to_string(), + e, + )); if !options.no_fail_fast { break; } @@ -133,52 +131,65 @@ } fn run_doc_tests( - options: &TestOptions<'_>, + config: &Config, + options: &TestOptions, test_args: &[&str], compilation: &Compilation<'_>, ) -> CargoResult<(Test, Vec)> { let mut errors = Vec::new(); - let config = options.compile_opts.config; - - // The unstable doctest-xcompile feature enables both per-target-ignores and - // cross-compiling doctests. As a side effect, this feature also gates running - // doctests with runtools when target == host. let doctest_xcompile = config.cli_unstable().doctest_xcompile; - let mut runtool: &Option<(std::path::PathBuf, Vec)> = &None; - if doctest_xcompile { - runtool = compilation.target_runner(); - } else if compilation.host != compilation.target { - return Ok((Test::Doc, errors)); - } for doctest_info in &compilation.to_doc_test { let Doctest { - package, - target, args, unstable_opts, + unit, + linker, } = doctest_info; - config.shell().status("Doc-tests", target.name())?; - let mut p = compilation.rustdoc_process(package, target)?; + + if !doctest_xcompile { + match unit.kind { + CompileKind::Host => {} + CompileKind::Target(target) => { + if target.short_name() != compilation.host { + // Skip doctests, -Zdoctest-xcompile not enabled. + continue; + } + } + } + } + + config.shell().status("Doc-tests", unit.target.name())?; + let mut p = compilation.rustdoc_process(unit)?; p.arg("--test") - .arg(target.src_path().path().unwrap()) + .arg(unit.target.src_path().path().unwrap()) .arg("--crate-name") - .arg(&target.crate_name()); + .arg(&unit.target.crate_name()); if doctest_xcompile { - p.arg("--target").arg(&compilation.target); + if let CompileKind::Target(target) = unit.kind { + // use `rustc_target()` to properly handle JSON target paths + p.arg("--target").arg(target.rustc_target()); + } p.arg("-Zunstable-options"); p.arg("--enable-per-target-ignores"); - } - - if let Some((runtool, runtool_args)) = runtool { - p.arg("--runtool").arg(runtool); - for arg in runtool_args { - p.arg("--runtool-arg").arg(arg); + if let Some((runtool, runtool_args)) = compilation.target_runner(unit.kind) { + p.arg("--runtool").arg(runtool); + for arg in runtool_args { + p.arg("--runtool-arg").arg(arg); + } + } + if let Some(linker) = linker { + let mut joined = OsString::from("linker="); + joined.push(linker); + p.arg("-C").arg(joined); } } - for &rust_dep in &[&compilation.deps_output] { + for &rust_dep in &[ + &compilation.deps_output[&unit.kind], + &compilation.deps_output[&CompileKind::Host], + ] { let mut arg = OsString::from("dependency="); arg.push(rust_dep); p.arg("-L").arg(arg); @@ -188,17 +199,11 @@ p.arg("-L").arg(native_dep); } - for &host_rust_dep in &[&compilation.host_deps_output] { - let mut arg = OsString::from("dependency="); - arg.push(host_rust_dep); - p.arg("-L").arg(arg); - } - for arg in test_args { p.arg("--test-args").arg(arg); } - if let Some(cfgs) = compilation.cfgs.get(&package.package_id()) { + if let Some(cfgs) = compilation.cfgs.get(&unit.pkg.package_id()) { for cfg in cfgs.iter() { p.arg("--cfg").arg(cfg); } @@ -212,7 +217,7 @@ p.arg("-Zunstable-options"); } - if let Some(flags) = compilation.rustdocflags.get(&package.package_id()) { + if let Some(flags) = compilation.rustdocflags.get(&unit.pkg.package_id()) { p.args(flags); } config diff -Nru cargo-0.44.1/src/cargo/ops/cargo_uninstall.rs cargo-0.47.0/src/cargo/ops/cargo_uninstall.rs --- cargo-0.44.1/src/cargo/ops/cargo_uninstall.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/cargo_uninstall.rs 2020-07-17 20:39:39.000000000 +0000 @@ -5,6 +5,7 @@ use crate::core::PackageId; use crate::core::{PackageIdSpec, SourceId}; use crate::ops::common_for_install_and_uninstall::*; +use crate::sources::PathSource; use crate::util::errors::CargoResult; use crate::util::paths; use crate::util::Config; @@ -84,10 +85,13 @@ fn uninstall_cwd(root: &Filesystem, bins: &[String], config: &Config) -> CargoResult<()> { let tracker = InstallTracker::load(config, root)?; let source_id = SourceId::for_path(config.cwd())?; - let src = path_source(source_id, config)?; - let pkg = select_pkg(src, None, None, config, true, &mut |path| { - path.read_packages() - })?; + let mut src = path_source(source_id, config)?; + let pkg = select_pkg( + &mut src, + None, + |path: &mut PathSource<'_>| path.read_packages(), + config, + )?; let pkgid = pkg.package_id(); uninstall_pkgid(root, tracker, pkgid, bins, config) } diff -Nru cargo-0.44.1/src/cargo/ops/common_for_install_and_uninstall.rs cargo-0.47.0/src/cargo/ops/common_for_install_and_uninstall.rs --- cargo-0.44.1/src/cargo/ops/common_for_install_and_uninstall.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/common_for_install_and_uninstall.rs 2020-07-17 20:39:39.000000000 +0000 @@ -5,7 +5,6 @@ use std::path::{Path, PathBuf}; use anyhow::{bail, format_err}; -use semver::VersionReq; use serde::{Deserialize, Serialize}; use crate::core::compiler::Freshness; @@ -13,7 +12,7 @@ use crate::ops::{self, CompileFilter, CompileOptions}; use crate::sources::PathSource; use crate::util::errors::{CargoResult, CargoResultExt}; -use crate::util::{Config, ToSemver}; +use crate::util::Config; use crate::util::{FileLock, Filesystem}; /// On-disk tracking for which package installed which binary. @@ -159,7 +158,7 @@ dst: &Path, pkg: &Package, force: bool, - opts: &CompileOptions<'_>, + opts: &CompileOptions, target: &str, _rustc: &str, ) -> CargoResult<(Freshness, BTreeMap>)> { @@ -267,7 +266,7 @@ package: &Package, bins: &BTreeSet, version_req: Option, - opts: &CompileOptions<'_>, + opts: &CompileOptions, target: &str, rustc: &str, ) { @@ -401,7 +400,7 @@ pkg: &Package, bins: &BTreeSet, version_req: Option, - opts: &CompileOptions<'_>, + opts: &CompileOptions, target: &str, rustc: &str, ) { @@ -490,17 +489,12 @@ /// Determine if this installation is "up to date", or if it needs to be reinstalled. /// /// This does not do Package/Source/Version checking. - fn is_up_to_date( - &self, - opts: &CompileOptions<'_>, - target: &str, - exes: &BTreeSet, - ) -> bool { + fn is_up_to_date(&self, opts: &CompileOptions, target: &str, exes: &BTreeSet) -> bool { self.features == feature_set(&opts.features) && self.all_features == opts.all_features && self.no_default_features == opts.no_default_features && self.profile.as_str() == opts.build_config.requested_profile.as_str() - && (self.target.is_none() || self.target.as_ref().map(|t| t.as_ref()) == Some(target)) + && (self.target.is_none() || self.target.as_deref() == Some(target)) && &self.bins == exes } } @@ -526,16 +520,14 @@ } /// Gets a Package based on command-line requirements. -pub fn select_pkg<'a, T>( - mut source: T, - name: Option<&str>, - vers: Option<&str>, +pub fn select_dep_pkg( + source: &mut T, + dep: Dependency, config: &Config, needs_update: bool, - list_all: &mut dyn FnMut(&mut T) -> CargoResult>, ) -> CargoResult where - T: Source + 'a, + T: Source, { // This operation may involve updating some sources or making a few queries // which may involve frobbing caches, as a result make sure we synchronize @@ -546,83 +538,42 @@ source.update()?; } - if let Some(name) = name { - let vers = if let Some(v) = vers { - // If the version begins with character <, >, =, ^, ~ parse it as a - // version range, otherwise parse it as a specific version - let first = v - .chars() - .next() - .ok_or_else(|| format_err!("no version provided for the `--vers` flag"))?; - - let is_req = "<>=^~".contains(first) || v.contains('*'); - if is_req { - match v.parse::() { - Ok(v) => Some(v.to_string()), - Err(_) => bail!( - "the `--vers` provided, `{}`, is \ - not a valid semver version requirement\n\n\ - Please have a look at \ - https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html \ - for the correct format", - v - ), - } - } else { - match v.to_semver() { - Ok(v) => Some(format!("={}", v)), - Err(e) => { - let mut msg = format!( - "the `--vers` provided, `{}`, is \ - not a valid semver version: {}\n", - v, e - ); - - // If it is not a valid version but it is a valid version - // requirement, add a note to the warning - if v.parse::().is_ok() { - msg.push_str(&format!( - "\nif you want to specify semver range, \ - add an explicit qualifier, like ^{}", - v - )); - } - bail!(msg); - } - } - } - } else { - None - }; - let vers = vers.as_ref().map(|s| &**s); - let vers_spec = if vers.is_none() && source.source_id().is_registry() { - // Avoid pre-release versions from crate.io - // unless explicitly asked for - Some("*") - } else { - vers - }; - let dep = Dependency::parse_no_deprecated(name, vers_spec, source.source_id())?; - let deps = source.query_vec(&dep)?; - match deps.iter().map(|p| p.package_id()).max() { - Some(pkgid) => { - let pkg = Box::new(&mut source).download_now(pkgid, config)?; - Ok(pkg) - } - None => { - let vers_info = vers - .map(|v| format!(" with version `{}`", v)) - .unwrap_or_default(); - bail!( - "could not find `{}` in {}{}", - name, - source.source_id(), - vers_info - ) - } - } + let deps = source.query_vec(&dep)?; + match deps.iter().map(|p| p.package_id()).max() { + Some(pkgid) => { + let pkg = Box::new(source).download_now(pkgid, config)?; + Ok(pkg) + } + None => bail!( + "could not find `{}` in {} with version `{}`", + dep.package_name(), + source.source_id(), + dep.version_req(), + ), + } +} + +pub fn select_pkg( + source: &mut T, + dep: Option, + mut list_all: F, + config: &Config, +) -> CargoResult +where + T: Source, + F: FnMut(&mut T) -> CargoResult>, +{ + // This operation may involve updating some sources or making a few queries + // which may involve frobbing caches, as a result make sure we synchronize + // with other global Cargos + let _lock = config.acquire_package_cache_lock()?; + + source.update()?; + + return if let Some(dep) = dep { + select_dep_pkg(source, dep, config, false) } else { - let candidates = list_all(&mut source)?; + let candidates = list_all(source)?; let binaries = candidates .iter() .filter(|cand| cand.targets().iter().filter(|t| t.is_bin()).count() > 0); @@ -635,23 +586,23 @@ Some(p) => p, None => bail!( "no packages found with binaries or \ - examples" + examples" ), }, }; - return Ok(pkg.clone()); + Ok(pkg.clone()) + }; - fn multi_err(kind: &str, mut pkgs: Vec<&Package>) -> String { - pkgs.sort_unstable_by_key(|a| a.name()); - format!( - "multiple packages with {} found: {}", - kind, - pkgs.iter() - .map(|p| p.name().as_str()) - .collect::>() - .join(", ") - ) - } + fn multi_err(kind: &str, mut pkgs: Vec<&Package>) -> String { + pkgs.sort_unstable_by_key(|a| a.name()); + format!( + "multiple packages with {} found: {}", + kind, + pkgs.iter() + .map(|p| p.name().as_str()) + .collect::>() + .join(", ") + ) } } diff -Nru cargo-0.44.1/src/cargo/ops/fix.rs cargo-0.47.0/src/cargo/ops/fix.rs --- cargo-0.44.1/src/cargo/ops/fix.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/fix.rs 2020-07-17 20:39:39.000000000 +0000 @@ -41,7 +41,6 @@ use std::collections::{BTreeSet, HashMap, HashSet}; use std::env; use std::ffi::OsString; -use std::fs; use std::path::{Path, PathBuf}; use std::process::{self, Command, ExitStatus}; use std::str; @@ -55,7 +54,7 @@ use crate::ops::{self, CompileOptions}; use crate::util::diagnostic_server::{Message, RustfixDiagnosticServer}; use crate::util::errors::CargoResult; -use crate::util::{self, paths}; +use crate::util::{self, paths, Config, ProcessBuilder}; use crate::util::{existing_vcs_repo, LockServer, LockServerClient}; const FIX_ENV: &str = "__CARGO_FIX_PLZ"; @@ -63,22 +62,20 @@ const PREPARE_FOR_ENV: &str = "__CARGO_FIX_PREPARE_FOR"; const EDITION_ENV: &str = "__CARGO_FIX_EDITION"; const IDIOMS_ENV: &str = "__CARGO_FIX_IDIOMS"; -const CLIPPY_FIX_ARGS: &str = "__CARGO_FIX_CLIPPY_ARGS"; pub struct FixOptions<'a> { pub edition: bool, pub prepare_for: Option<&'a str>, pub idioms: bool, - pub compile_opts: CompileOptions<'a>, + pub compile_opts: CompileOptions, pub allow_dirty: bool, pub allow_no_vcs: bool, pub allow_staged: bool, pub broken_code: bool, - pub clippy_args: Option>, } pub fn fix(ws: &Workspace<'_>, opts: &mut FixOptions<'_>) -> CargoResult<()> { - check_version_control(opts)?; + check_version_control(ws.config(), opts)?; // Spin up our lock server, which our subprocesses will use to synchronize fixes. let lock_server = LockServer::new()?; @@ -101,19 +98,6 @@ wrapper.env(IDIOMS_ENV, "1"); } - if opts.clippy_args.is_some() { - if let Err(e) = util::process("clippy-driver").arg("-V").exec_with_output() { - eprintln!("Warning: clippy-driver not found: {:?}", e); - } - - let clippy_args = opts - .clippy_args - .as_ref() - .map_or_else(String::new, |args| serde_json::to_string(&args).unwrap()); - - wrapper.env(CLIPPY_FIX_ARGS, clippy_args); - } - *opts .compile_opts .build_config @@ -130,7 +114,7 @@ server.configure(&mut wrapper); } - let rustc = opts.compile_opts.config.load_global_rustc(Some(ws))?; + let rustc = ws.config().load_global_rustc(Some(ws))?; wrapper.arg(&rustc.path); // primary crates are compiled using a cargo subprocess to do extra work of applying fixes and @@ -141,11 +125,10 @@ Ok(()) } -fn check_version_control(opts: &FixOptions<'_>) -> CargoResult<()> { +fn check_version_control(config: &Config, opts: &FixOptions<'_>) -> CargoResult<()> { if opts.allow_no_vcs { return Ok(()); } - let config = opts.compile_opts.config; if !existing_vcs_repo(config.cwd(), config.cwd()) { anyhow::bail!( "no VCS found for this package and `cargo fix` can potentially \ @@ -222,12 +205,17 @@ let args = FixArgs::get(); trace!("cargo-fix as rustc got file {:?}", args.file); + let rustc = args.rustc.as_ref().expect("fix wrapper rustc was not set"); + let workspace_rustc = std::env::var("RUSTC_WORKSPACE_WRAPPER") + .map(PathBuf::from) + .ok(); + let rustc = util::process(rustc).wrapped(workspace_rustc.as_ref()); let mut fixes = FixedCrate::default(); if let Some(path) = &args.file { trace!("start rustfixing {:?}", path); - fixes = rustfix_crate(&lock_addr, rustc.as_ref(), path, &args)?; + fixes = rustfix_crate(&lock_addr, &rustc, path, &args)?; } // Ok now we have our final goal of testing out the changes that we applied. @@ -239,7 +227,7 @@ // new rustc, and otherwise we capture the output to hide it in the scenario // that we have to back it all out. if !fixes.files.is_empty() { - let mut cmd = Command::new(&rustc); + let mut cmd = rustc.build_command(); args.apply(&mut cmd); cmd.arg("--error-format=json"); let output = cmd.output().context("failed to spawn rustc")?; @@ -267,8 +255,7 @@ if !output.status.success() { if env::var_os(BROKEN_CODE_ENV).is_none() { for (path, file) in fixes.files.iter() { - fs::write(path, &file.original_code) - .with_context(|| format!("failed to write file `{}`", path))?; + paths::write(path, &file.original_code)?; } } log_failed_fix(&output.stderr)?; @@ -279,7 +266,7 @@ // - If the fix failed, show the original warnings and suggestions. // - If `--broken-code`, show the error messages. // - If the fix succeeded, show any remaining warnings. - let mut cmd = Command::new(&rustc); + let mut cmd = rustc.build_command(); args.apply(&mut cmd); for arg in args.format_args { // Add any json/error format arguments that Cargo wants. This allows @@ -302,7 +289,7 @@ fn rustfix_crate( lock_addr: &str, - rustc: &Path, + rustc: &ProcessBuilder, filename: &Path, args: &FixArgs, ) -> Result { @@ -402,7 +389,7 @@ /// and any errors encountered while fixing files. fn rustfix_and_fix( fixes: &mut FixedCrate, - rustc: &Path, + rustc: &ProcessBuilder, filename: &Path, args: &FixArgs, ) -> Result<(), Error> { @@ -410,12 +397,15 @@ // TODO: implement a way to specify this. let only = HashSet::new(); - let mut cmd = Command::new(rustc); + let mut cmd = rustc.build_command(); cmd.arg("--error-format=json"); args.apply(&mut cmd); - let output = cmd - .output() - .with_context(|| format!("failed to execute `{}`", rustc.display()))?; + let output = cmd.output().with_context(|| { + format!( + "failed to execute `{}`", + rustc.get_program().to_string_lossy() + ) + })?; // If rustc didn't succeed for whatever reasons then we're very likely to be // looking at otherwise broken code. Let's not make things accidentally @@ -491,7 +481,7 @@ // Attempt to read the source code for this file. If this fails then // that'd be pretty surprising, so log a message and otherwise keep // going. - let code = match paths::read(file.as_ref()) { + let code = match util::paths::read(file.as_ref()) { Ok(s) => s, Err(e) => { warn!("failed to read `{}`: {}", file, e); @@ -525,7 +515,7 @@ } } let new_code = fixed.finish()?; - fs::write(&file, new_code).with_context(|| format!("failed to write file `{}`", file))?; + paths::write(&file, new_code)?; } Ok(()) @@ -534,9 +524,14 @@ fn exit_with(status: ExitStatus) -> ! { #[cfg(unix)] { + use std::io::Write; use std::os::unix::prelude::*; if let Some(signal) = status.signal() { - eprintln!("child failed with signal `{}`", signal); + drop(writeln!( + std::io::stderr().lock(), + "child failed with signal `{}`", + signal + )); process::exit(2); } } @@ -591,7 +586,6 @@ enabled_edition: Option, other: Vec, rustc: Option, - clippy_args: Vec, format_args: Vec, } @@ -611,12 +605,7 @@ fn get() -> FixArgs { let mut ret = FixArgs::default(); - if let Ok(clippy_args) = env::var(CLIPPY_FIX_ARGS) { - ret.clippy_args = serde_json::from_str(&clippy_args).unwrap(); - ret.rustc = Some(util::config::clippy_driver()); - } else { - ret.rustc = env::args_os().nth(1).map(PathBuf::from); - } + ret.rustc = env::args_os().nth(1).map(PathBuf::from); for arg in env::args_os().skip(2) { let path = PathBuf::from(arg); @@ -654,10 +643,6 @@ cmd.arg(path); } - if !self.clippy_args.is_empty() { - cmd.args(&self.clippy_args); - } - cmd.args(&self.other).arg("--cap-lints=warn"); if let Some(edition) = &self.enabled_edition { cmd.arg("--edition").arg(edition); @@ -713,7 +698,7 @@ } fn next_edition(&self) -> &str { - match self.enabled_edition.as_ref().map(|s| &**s) { + match self.enabled_edition.as_deref() { // 2015 -> 2018, None | Some("2015") => "2018", diff -Nru cargo-0.44.1/src/cargo/ops/mod.rs cargo-0.47.0/src/cargo/ops/mod.rs --- cargo-0.44.1/src/cargo/ops/mod.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/mod.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,5 +1,7 @@ pub use self::cargo_clean::{clean, CleanOptions}; -pub use self::cargo_compile::{compile, compile_with_exec, compile_ws, CompileOptions}; +pub use self::cargo_compile::{ + compile, compile_with_exec, compile_ws, create_bcx, resolve_all_features, CompileOptions, +}; pub use self::cargo_compile::{CompileFilter, FilterRule, LibRule, Packages}; pub use self::cargo_doc::{doc, DocOptions}; pub use self::cargo_fetch::{fetch, FetchOptions}; @@ -46,4 +48,5 @@ mod lockfile; mod registry; mod resolve; +pub mod tree; mod vendor; diff -Nru cargo-0.44.1/src/cargo/ops/registry.rs cargo-0.47.0/src/cargo/ops/registry.rs --- cargo-0.44.1/src/cargo/ops/registry.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/registry.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,5 +1,5 @@ use std::collections::{BTreeMap, HashSet}; -use std::fs::{self, File}; +use std::fs::File; use std::io::{self, BufRead}; use std::iter::repeat; use std::str; @@ -7,7 +7,7 @@ use std::{cmp, env}; use anyhow::{bail, format_err}; -use crates_io::{NewCrate, NewCrateDependency, Registry}; +use crates_io::{self, NewCrate, NewCrateDependency, Registry}; use curl::easy::{Easy, InfoType, SslOpt, SslVersion}; use log::{log, Level}; use percent_encoding::{percent_encode, NON_ALPHANUMERIC}; @@ -23,10 +23,15 @@ use crate::util::important_paths::find_root_manifest_for_wd; use crate::util::IntoUrl; use crate::util::{paths, validate_package_name}; -use crate::version; +use crate::{drop_print, drop_println, version}; +/// Registry settings loaded from config files. +/// +/// This is loaded based on the `--registry` flag and the config settings. pub struct RegistryConfig { + /// The index URL. If `None`, use crates.io. pub index: Option, + /// The authentication token. pub token: Option, } @@ -37,7 +42,7 @@ pub verify: bool, pub allow_dirty: bool, pub jobs: Option, - pub target: Option, + pub targets: Vec, pub dry_run: bool, pub registry: Option, pub features: Vec, @@ -83,7 +88,7 @@ list: false, check_metadata: true, allow_dirty: opts.allow_dirty, - target: opts.target.clone(), + targets: opts.targets.clone(), jobs: opts.jobs, features: opts.features.clone(), all_features: opts.all_features, @@ -228,12 +233,15 @@ ref badges, ref links, } = *manifest.metadata(); - let readme_content = match *readme { - Some(ref readme) => Some(paths::read(&pkg.root().join(readme))?), - None => None, - }; + let readme_content = readme + .as_ref() + .map(|readme| { + paths::read(&pkg.root().join(readme)) + .chain_err(|| format!("failed to read `readme` file for package `{}`", pkg)) + }) + .transpose()?; if let Some(ref file) = *license_file { - if fs::metadata(&pkg.root().join(file)).is_err() { + if !pkg.root().join(file).exists() { bail!("the license file `{}` does not exist", file) } } @@ -316,10 +324,15 @@ } } +/// Returns the index and token from the config file for the given registry. +/// +/// `registry` is typically the registry specified on the command-line. If +/// `None`, `index` is set to `None` to indicate it should use crates.io. pub fn registry_configuration( config: &Config, registry: Option, ) -> CargoResult { + // `registry.default` is handled in command-line parsing. let (index, token) = match registry { Some(registry) => { validate_package_name(®istry, "registry name", "")?; @@ -331,19 +344,26 @@ ) } None => { - // Checking for default index and token - ( - config - .get_default_registry_index()? - .map(|url| url.to_string()), - config.get_string("registry.token")?.map(|p| p.val), - ) + // Use crates.io default. + config.check_registry_index_not_set()?; + (None, config.get_string("registry.token")?.map(|p| p.val)) } }; Ok(RegistryConfig { index, token }) } +/// Returns the `Registry` and `Source` based on command-line and config settings. +/// +/// * `token`: The token from the command-line. If not set, uses the token +/// from the config. +/// * `index`: The index URL from the command-line. This is ignored if +/// `registry` is set. +/// * `registry`: The registry name from the command-line. If neither +/// `registry`, or `index` are set, then uses `crates-io`, honoring +/// `[source]` replacement if defined. +/// * `force_update`: If `true`, forces the index to be updated. +/// * `validate_token`: If `true`, the token must be set. fn registry( config: &Config, token: Option, @@ -352,13 +372,17 @@ force_update: bool, validate_token: bool, ) -> CargoResult<(Registry, SourceId)> { + if index.is_some() && registry.is_some() { + // Otherwise we would silently ignore one or the other. + bail!("both `--index` and `--registry` should not be set at the same time"); + } // Parse all configuration options let RegistryConfig { token: token_config, index: index_config, } = registry_configuration(config, registry.clone())?; - let token = token.or(token_config); - let sid = get_source_id(config, index_config.or(index), registry)?; + let opt_index = index_config.as_ref().or_else(|| index.as_ref()); + let sid = get_source_id(config, opt_index, registry.as_ref())?; if !sid.is_remote_registry() { bail!( "{} does not support API commands.\n\ @@ -386,10 +410,51 @@ cfg.and_then(|cfg| cfg.api) .ok_or_else(|| format_err!("{} does not support API commands", sid))? }; - let handle = http_handle(config)?; - if validate_token && token.is_none() { - bail!("no upload token found, please run `cargo login`"); + let token = match (&index, &token, &token_config) { + // No token. + (None, None, None) => { + if validate_token { + bail!("no upload token found, please run `cargo login` or pass `--token`"); + } + None + } + // Token on command-line. + (_, Some(_), _) => token, + // Token in config, no --index, loading from config is OK for crates.io. + (None, None, Some(_)) => { + // Check `is_default_registry` so that the crates.io index can + // change config.json's "api" value, and this won't affect most + // people. It will affect those using source replacement, but + // hopefully that's a relatively small set of users. + if registry.is_none() + && !sid.is_default_registry() + && !crates_io::is_url_crates_io(&api_host) + { + if validate_token { + config.shell().warn( + "using `registry.token` config value with source \ + replacement is deprecated\n\ + This may become a hard error in the future; \ + see .\n\ + Use the --token command-line flag to remove this warning.", + )?; + token_config + } else { + None + } + } else { + token_config + } + } + // --index, no --token + (Some(_), None, _) => { + if validate_token { + bail!("command-line argument --index requires --token to be specified") + } + None + } }; + let handle = http_handle(config)?; Ok((Registry::new_handle(api_host, token, handle), sid)) } @@ -494,7 +559,12 @@ }; match str::from_utf8(data) { Ok(s) => { - for line in s.lines() { + for mut line in s.lines() { + if line.starts_with("Authorization:") { + line = "Authorization: [REDACTED]"; + } else if line[..line.len().min(10)].eq_ignore_ascii_case("set-cookie") { + line = "set-cookie: [REDACTED]"; + } log!(level, "http-debug: {} {}", prefix, line); } } @@ -593,7 +663,8 @@ let token = match token { Some(token) => token, None => { - println!( + drop_println!( + config, "please visit {}/me and paste the API Token below", registry.host() ); @@ -684,11 +755,11 @@ .list_owners(&name) .chain_err(|| format!("failed to list owners of crate {}", name))?; for owner in owners.iter() { - print!("{}", owner.login); + drop_print!(config, "{}", owner.login); match (owner.name.as_ref(), owner.email.as_ref()) { - (Some(name), Some(email)) => println!(" ({} <{}>)", name, email), - (Some(s), None) | (None, Some(s)) => println!(" ({})", s), - (None, None) => println!(), + (Some(name), Some(email)) => drop_println!(config, " ({} <{}>)", name, email), + (Some(s), None) | (None, Some(s)) => drop_println!(config, " ({})", s), + (None, None) => drop_println!(config), } } } @@ -739,13 +810,17 @@ Ok(()) } +/// Gets the SourceId for an index or registry setting. +/// +/// The `index` and `reg` values are from the command-line or config settings. +/// If both are None, returns the source for crates.io. fn get_source_id( config: &Config, - index: Option, - reg: Option, + index: Option<&String>, + reg: Option<&String>, ) -> CargoResult { match (reg, index) { - (Some(r), _) => SourceId::alt_registry(config, &r), + (Some(r), _) => SourceId::alt_registry(config, r), (_, Some(i)) => SourceId::for_registry(&i.into_url()?), _ => { let map = SourceConfigMap::new(config)?; @@ -805,12 +880,13 @@ } None => name, }; - println!("{}", line); + drop_println!(config, "{}", line); } let search_max_limit = 100; if total_crates > limit && limit < search_max_limit { - println!( + drop_println!( + config, "... and {} crates more (use --limit N to see more)", total_crates - limit ); @@ -823,7 +899,12 @@ } else { String::new() }; - println!("... and {} crates more{}", total_crates - limit, extra); + drop_println!( + config, + "... and {} crates more{}", + total_crates - limit, + extra + ); } Ok(()) diff -Nru cargo-0.44.1/src/cargo/ops/resolve.rs cargo-0.47.0/src/cargo/ops/resolve.rs --- cargo-0.44.1/src/cargo/ops/resolve.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/resolve.rs 2020-07-17 20:39:39.000000000 +0000 @@ -12,7 +12,7 @@ use crate::core::compiler::{CompileKind, RustcTargetData}; use crate::core::registry::PackageRegistry; -use crate::core::resolver::features::{FeatureResolver, ResolvedFeatures}; +use crate::core::resolver::features::{FeatureResolver, ForceAllTargets, ResolvedFeatures}; use crate::core::resolver::{self, HasDevUnits, Resolve, ResolveOpts}; use crate::core::summary::Summary; use crate::core::Feature; @@ -20,14 +20,14 @@ use crate::ops; use crate::sources::PathSource; use crate::util::errors::{CargoResult, CargoResultExt}; -use crate::util::profile; +use crate::util::{profile, CanonicalUrl}; use log::{debug, trace}; use std::collections::HashSet; /// Result for `resolve_ws_with_opts`. -pub struct WorkspaceResolve<'a> { +pub struct WorkspaceResolve<'cfg> { /// Packages to be downloaded. - pub pkg_set: PackageSet<'a>, + pub pkg_set: PackageSet<'cfg>, /// The resolve for the entire workspace. /// /// This may be `None` for things like `cargo install` and `-Zavoid-dev-deps`. @@ -72,14 +72,15 @@ /// /// `specs` may be empty, which indicates it should resolve all workspace /// members. In this case, `opts.all_features` must be `true`. -pub fn resolve_ws_with_opts<'a>( - ws: &Workspace<'a>, +pub fn resolve_ws_with_opts<'cfg>( + ws: &Workspace<'cfg>, target_data: &RustcTargetData, - requested_target: CompileKind, + requested_targets: &[CompileKind], opts: &ResolveOpts, specs: &[PackageIdSpec], has_dev_units: HasDevUnits, -) -> CargoResult> { + force_all_targets: ForceAllTargets, +) -> CargoResult> { let mut registry = PackageRegistry::new(ws.config())?; let mut add_patches = true; let resolve = if ws.ignore_lock() { @@ -126,14 +127,29 @@ let pkg_set = get_resolved_packages(&resolved_with_overrides, registry)?; + let member_ids = ws + .members_with_features(specs, &opts.features)? + .into_iter() + .map(|(p, _fts)| p.package_id()) + .collect::>(); + pkg_set.download_accessible( + &resolved_with_overrides, + &member_ids, + has_dev_units, + requested_targets, + target_data, + )?; + let resolved_features = FeatureResolver::resolve( ws, target_data, &resolved_with_overrides, + &pkg_set, &opts.features, specs, - requested_target, + requested_targets, has_dev_units, + force_all_targets, )?; Ok(WorkspaceResolve { @@ -159,7 +175,7 @@ true, )?; - if !ws.is_ephemeral() { + if !ws.is_ephemeral() && ws.require_optional_deps() { ops::write_pkg_lockfile(ws, &resolve)?; } Ok(resolve) @@ -200,17 +216,16 @@ // // TODO: this seems like a hokey reason to single out the registry as being // different. - let mut to_avoid_sources: HashSet = HashSet::new(); - if let Some(to_avoid) = to_avoid { - to_avoid_sources.extend( - to_avoid - .iter() + let to_avoid_sources: HashSet = to_avoid + .map(|set| { + set.iter() .map(|p| p.source_id()) - .filter(|s| !s.is_registry()), - ); - } + .filter(|s| !s.is_registry()) + .collect() + }) + .unwrap_or_default(); - let keep = |p: &PackageId| { + let pre_patch_keep = |p: &PackageId| { !to_avoid_sources.contains(&p.source_id()) && match to_avoid { Some(set) => !set.contains(p), @@ -218,28 +233,18 @@ } }; - // In the case where a previous instance of resolve is available, we - // want to lock as many packages as possible to the previous version - // without disturbing the graph structure. - let mut try_to_use = HashSet::new(); - if let Some(r) = previous { - trace!("previous: {:?}", r); - register_previous_locks(ws, registry, r, &keep); - - // Everything in the previous lock file we want to keep is prioritized - // in dependency selection if it comes up, aka we want to have - // conservative updates. - try_to_use.extend(r.iter().filter(keep).inspect(|id| { - debug!("attempting to prefer {}", id); - })); - } - + // This is a set of PackageIds of `[patch]` entries that should not be + // locked. + let mut avoid_patch_ids = HashSet::new(); if register_patches { for (url, patches) in ws.root_patch() { let previous = match previous { Some(r) => r, None => { - registry.patch(url, patches)?; + let patches: Vec<_> = patches.iter().map(|p| (p, None)).collect(); + let unlock_ids = registry.patch(url, &patches)?; + // Since nothing is locked, this shouldn't possibly return anything. + assert!(unlock_ids.is_empty()); continue; } }; @@ -248,19 +253,57 @@ .map(|dep| { let unused = previous.unused_patches().iter().cloned(); let candidates = previous.iter().chain(unused); - match candidates.filter(keep).find(|&id| dep.matches_id(id)) { + match candidates + .filter(pre_patch_keep) + .find(|&id| dep.matches_id(id)) + { Some(id) => { - let mut dep = dep.clone(); - dep.lock_to(id); - dep + let mut locked_dep = dep.clone(); + locked_dep.lock_to(id); + (dep, Some((locked_dep, id))) } - None => dep.clone(), + None => (dep, None), } }) .collect::>(); - registry.patch(url, &patches)?; + let canonical = CanonicalUrl::new(url)?; + for (orig_patch, unlock_id) in registry.patch(url, &patches)? { + // Avoid the locked patch ID. + avoid_patch_ids.insert(unlock_id); + // Also avoid the thing it is patching. + avoid_patch_ids.extend(previous.iter().filter(|id| { + orig_patch.matches_ignoring_source(*id) + && *id.source_id().canonical_url() == canonical + })); + } } + } + debug!("avoid_patch_ids={:?}", avoid_patch_ids); + let keep = |p: &PackageId| pre_patch_keep(p) && !avoid_patch_ids.contains(p); + + // In the case where a previous instance of resolve is available, we + // want to lock as many packages as possible to the previous version + // without disturbing the graph structure. + if let Some(r) = previous { + trace!("previous: {:?}", r); + register_previous_locks(ws, registry, r, &keep); + } + // Everything in the previous lock file we want to keep is prioritized + // in dependency selection if it comes up, aka we want to have + // conservative updates. + let try_to_use = previous + .map(|r| { + r.iter() + .filter(keep) + .inspect(|id| { + debug!("attempting to prefer {}", id); + }) + .collect() + }) + .unwrap_or_default(); + + if register_patches { registry.lock_patches(); } @@ -371,10 +414,10 @@ Ok(()) } -pub fn get_resolved_packages<'a>( +pub fn get_resolved_packages<'cfg>( resolve: &Resolve, - registry: PackageRegistry<'a>, -) -> CargoResult> { + registry: PackageRegistry<'cfg>, +) -> CargoResult> { let ids: Vec = resolve.iter().collect(); registry.get(&ids) } @@ -545,6 +588,7 @@ // the registry as a locked dependency. let keep = |id: &PackageId| keep(id) && !avoid_locking.contains(id); + registry.clear_lock(); for node in resolve.iter().filter(keep) { let deps = resolve .deps_not_replaced(node) diff -Nru cargo-0.44.1/src/cargo/ops/tree/format/mod.rs cargo-0.47.0/src/cargo/ops/tree/format/mod.rs --- cargo-0.44.1/src/cargo/ops/tree/format/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/tree/format/mod.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,110 @@ +use self::parse::{Parser, RawChunk}; +use super::{Graph, Node}; +use anyhow::{bail, Error}; +use std::fmt; + +mod parse; + +enum Chunk { + Raw(String), + Package, + License, + Repository, + Features, +} + +pub struct Pattern(Vec); + +impl Pattern { + pub fn new(format: &str) -> Result { + let mut chunks = vec![]; + + for raw in Parser::new(format) { + let chunk = match raw { + RawChunk::Text(text) => Chunk::Raw(text.to_owned()), + RawChunk::Argument("p") => Chunk::Package, + RawChunk::Argument("l") => Chunk::License, + RawChunk::Argument("r") => Chunk::Repository, + RawChunk::Argument("f") => Chunk::Features, + RawChunk::Argument(a) => { + bail!("unsupported pattern `{}`", a); + } + RawChunk::Error(err) => bail!("{}", err), + }; + chunks.push(chunk); + } + + Ok(Pattern(chunks)) + } + + pub fn display<'a>(&'a self, graph: &'a Graph<'a>, node_index: usize) -> Display<'a> { + Display { + pattern: self, + graph, + node_index, + } + } +} + +pub struct Display<'a> { + pattern: &'a Pattern, + graph: &'a Graph<'a>, + node_index: usize, +} + +impl<'a> fmt::Display for Display<'a> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let node = self.graph.node(self.node_index); + match node { + Node::Package { + package_id, + features, + .. + } => { + let package = self.graph.package_for_id(*package_id); + for chunk in &self.pattern.0 { + match chunk { + Chunk::Raw(s) => fmt.write_str(s)?, + Chunk::Package => { + write!(fmt, "{} v{}", package.name(), package.version())?; + + let source_id = package.package_id().source_id(); + if !source_id.is_default_registry() { + write!(fmt, " ({})", source_id)?; + } + } + Chunk::License => { + if let Some(license) = &package.manifest().metadata().license { + write!(fmt, "{}", license)?; + } + } + Chunk::Repository => { + if let Some(repository) = &package.manifest().metadata().repository { + write!(fmt, "{}", repository)?; + } + } + Chunk::Features => { + write!(fmt, "{}", features.join(","))?; + } + } + } + } + Node::Feature { name, node_index } => { + let for_node = self.graph.node(*node_index); + match for_node { + Node::Package { package_id, .. } => { + write!(fmt, "{} feature \"{}\"", package_id.name(), name)?; + if self.graph.is_cli_feature(self.node_index) { + write!(fmt, " (command-line)")?; + } + } + // The node_index in Node::Feature must point to a package + // node, see `add_feature`. + _ => panic!("unexpected feature node {:?}", for_node), + } + } + } + + Ok(()) + } +} diff -Nru cargo-0.44.1/src/cargo/ops/tree/format/parse.rs cargo-0.47.0/src/cargo/ops/tree/format/parse.rs --- cargo-0.44.1/src/cargo/ops/tree/format/parse.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/tree/format/parse.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,123 @@ +//! Parser for the `--format` string for `cargo tree`. + +use std::iter; +use std::str; + +pub enum RawChunk<'a> { + /// Raw text to include in the output. + Text(&'a str), + /// A substitution to place in the output. For example, the argument "p" + /// emits the package name. + Argument(&'a str), + /// Indicates an error in the format string. The given string is a + /// human-readable message explaining the error. + Error(&'static str), +} + +/// `cargo tree` format parser. +/// +/// The format string indicates how each package should be displayed. It +/// includes simple markers surrounded in curly braces that will be +/// substituted with their corresponding values. For example, the text +/// "{p} license:{l}" will substitute the `{p}` with the package name/version +/// (and optionally source), and the `{l}` will be the license from +/// `Cargo.toml`. +/// +/// Substitutions are alphabetic characters between curly braces, like `{p}` +/// or `{foo}`. The actual interpretation of these are done in the `Pattern` +/// struct. +/// +/// Bare curly braces can be included in the output with double braces like +/// `{{` will include a single `{`, similar to Rust's format strings. +pub struct Parser<'a> { + s: &'a str, + it: iter::Peekable>, +} + +impl<'a> Parser<'a> { + pub fn new(s: &'a str) -> Parser<'a> { + Parser { + s, + it: s.char_indices().peekable(), + } + } + + fn consume(&mut self, ch: char) -> bool { + match self.it.peek() { + Some(&(_, c)) if c == ch => { + self.it.next(); + true + } + _ => false, + } + } + + fn argument(&mut self) -> RawChunk<'a> { + RawChunk::Argument(self.name()) + } + + fn name(&mut self) -> &'a str { + let start = match self.it.peek() { + Some(&(pos, ch)) if ch.is_alphabetic() => { + self.it.next(); + pos + } + _ => return "", + }; + + loop { + match self.it.peek() { + Some(&(_, ch)) if ch.is_alphanumeric() => { + self.it.next(); + } + Some(&(end, _)) => return &self.s[start..end], + None => return &self.s[start..], + } + } + } + + fn text(&mut self, start: usize) -> RawChunk<'a> { + while let Some(&(pos, ch)) = self.it.peek() { + match ch { + '{' | '}' => return RawChunk::Text(&self.s[start..pos]), + _ => { + self.it.next(); + } + } + } + RawChunk::Text(&self.s[start..]) + } +} + +impl<'a> Iterator for Parser<'a> { + type Item = RawChunk<'a>; + + fn next(&mut self) -> Option> { + match self.it.peek() { + Some(&(_, '{')) => { + self.it.next(); + if self.consume('{') { + Some(RawChunk::Text("{")) + } else { + let chunk = self.argument(); + if self.consume('}') { + Some(chunk) + } else { + for _ in &mut self.it {} + Some(RawChunk::Error("expected '}'")) + } + } + } + Some(&(_, '}')) => { + self.it.next(); + if self.consume('}') { + Some(RawChunk::Text("}")) + } else { + Some(RawChunk::Error("unexpected '}'")) + } + } + Some(&(i, _)) => Some(self.text(i)), + None => None, + } + } +} diff -Nru cargo-0.44.1/src/cargo/ops/tree/graph.rs cargo-0.47.0/src/cargo/ops/tree/graph.rs --- cargo-0.44.1/src/cargo/ops/tree/graph.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/tree/graph.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,596 @@ +//! Code for building the graph used by `cargo tree`. + +use super::TreeOptions; +use crate::core::compiler::{CompileKind, RustcTargetData}; +use crate::core::dependency::DepKind; +use crate::core::resolver::features::{FeaturesFor, RequestedFeatures, ResolvedFeatures}; +use crate::core::resolver::Resolve; +use crate::core::{FeatureMap, FeatureValue, Package, PackageId, PackageIdSpec, Workspace}; +use crate::util::interning::InternedString; +use crate::util::CargoResult; +use std::collections::{HashMap, HashSet}; + +#[derive(Debug, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)] +pub enum Node { + Package { + package_id: PackageId, + /// Features that are enabled on this package. + features: Vec, + kind: CompileKind, + }, + Feature { + /// Index of the package node this feature is for. + node_index: usize, + /// Name of the feature. + name: InternedString, + }, +} + +/// The kind of edge, for separating dependencies into different sections. +#[derive(Debug, Copy, Hash, Eq, Clone, PartialEq)] +pub enum EdgeKind { + Dep(DepKind), + Feature, +} + +/// Set of outgoing edges for a single node. +/// +/// Edges are separated by the edge kind (`DepKind` or `Feature`). This is +/// primarily done so that the output can easily display separate sections +/// like `[build-dependencies]`. +/// +/// The value is a `Vec` because each edge kind can have multiple outgoing +/// edges. For example, package "foo" can have multiple normal dependencies. +#[derive(Clone)] +struct Edges(HashMap>); + +impl Edges { + fn new() -> Edges { + Edges(HashMap::new()) + } + + /// Adds an edge pointing to the given node. + fn add_edge(&mut self, kind: EdgeKind, index: usize) { + let indexes = self.0.entry(kind).or_default(); + if !indexes.contains(&index) { + indexes.push(index) + } + } +} + +/// A graph of dependencies. +pub struct Graph<'a> { + nodes: Vec, + /// The indexes of `edges` correspond to the `nodes`. That is, `edges[0]` + /// is the set of outgoing edges for `nodes[0]`. They should always be in + /// sync. + edges: Vec, + /// Index maps a node to an index, for fast lookup. + index: HashMap, + /// Map for looking up packages. + package_map: HashMap, + /// Set of indexes of feature nodes that were added via the command-line. + /// + /// For example `--features foo` will mark the "foo" node here. + cli_features: HashSet, + /// Map of dependency names, used for building internal feature map for + /// dep_name/feat_name syntax. + /// + /// Key is the index of a package node, value is a map of dep_name to a + /// set of `(pkg_node_index, is_optional)`. + dep_name_map: HashMap>>, +} + +impl<'a> Graph<'a> { + fn new(package_map: HashMap) -> Graph<'a> { + Graph { + nodes: Vec::new(), + edges: Vec::new(), + index: HashMap::new(), + package_map, + cli_features: HashSet::new(), + dep_name_map: HashMap::new(), + } + } + + /// Adds a new node to the graph, returning its new index. + fn add_node(&mut self, node: Node) -> usize { + let from_index = self.nodes.len(); + self.nodes.push(node); + self.edges.push(Edges::new()); + self.index + .insert(self.nodes[from_index].clone(), from_index); + from_index + } + + /// Returns a list of nodes the given node index points to for the given kind. + pub fn connected_nodes(&self, from: usize, kind: &EdgeKind) -> Vec { + match self.edges[from].0.get(kind) { + Some(indexes) => { + // Created a sorted list for consistent output. + let mut indexes = indexes.clone(); + indexes.sort_unstable_by(|a, b| self.nodes[*a].cmp(&self.nodes[*b])); + indexes + } + None => Vec::new(), + } + } + + /// Returns `true` if the given node has any outgoing edges. + pub fn has_outgoing_edges(&self, index: usize) -> bool { + !self.edges[index].0.is_empty() + } + + /// Gets a node by index. + pub fn node(&self, index: usize) -> &Node { + &self.nodes[index] + } + + /// Given a slice of PackageIds, returns the indexes of all nodes that match. + pub fn indexes_from_ids(&self, package_ids: &[PackageId]) -> Vec { + let mut result: Vec<(&Node, usize)> = self + .nodes + .iter() + .enumerate() + .filter(|(_i, node)| match node { + Node::Package { package_id, .. } => package_ids.contains(package_id), + _ => false, + }) + .map(|(i, node)| (node, i)) + .collect(); + // Sort for consistent output (the same command should always return + // the same output). "unstable" since nodes should always be unique. + result.sort_unstable(); + result.into_iter().map(|(_node, i)| i).collect() + } + + pub fn package_for_id(&self, id: PackageId) -> &Package { + self.package_map[&id] + } + + fn package_id_for_index(&self, index: usize) -> PackageId { + match self.nodes[index] { + Node::Package { package_id, .. } => package_id, + Node::Feature { .. } => panic!("unexpected feature node"), + } + } + + /// Returns `true` if the given feature node index is a feature enabled + /// via the command-line. + pub fn is_cli_feature(&self, index: usize) -> bool { + self.cli_features.contains(&index) + } + + /// Returns a new graph by removing all nodes not reachable from the + /// given nodes. + pub fn from_reachable(&self, roots: &[usize]) -> Graph<'a> { + // Graph built with features does not (yet) support --duplicates. + assert!(self.dep_name_map.is_empty()); + let mut new_graph = Graph::new(self.package_map.clone()); + // Maps old index to new index. None if not yet visited. + let mut remap: Vec> = vec![None; self.nodes.len()]; + + fn visit( + graph: &Graph<'_>, + new_graph: &mut Graph<'_>, + remap: &mut Vec>, + index: usize, + ) -> usize { + if let Some(new_index) = remap[index] { + // Already visited. + return new_index; + } + let node = graph.node(index).clone(); + let new_from = new_graph.add_node(node); + remap[index] = Some(new_from); + // Visit dependencies. + for (edge_kind, edge_indexes) in &graph.edges[index].0 { + for edge_index in edge_indexes { + let new_to_index = visit(graph, new_graph, remap, *edge_index); + new_graph.edges[new_from].add_edge(*edge_kind, new_to_index); + } + } + new_from + } + + // Walk the roots, generating a new graph as it goes along. + for root in roots { + visit(self, &mut new_graph, &mut remap, *root); + } + + new_graph + } + + /// Inverts the direction of all edges. + pub fn invert(&mut self) { + let mut new_edges = vec![Edges::new(); self.edges.len()]; + for (from_idx, node_edges) in self.edges.iter().enumerate() { + for (kind, edges) in &node_edges.0 { + for edge_idx in edges { + new_edges[*edge_idx].add_edge(*kind, from_idx); + } + } + } + self.edges = new_edges; + } + + /// Returns a list of nodes that are considered "duplicates" (same package + /// name, with different versions/features/source/etc.). + pub fn find_duplicates(&self) -> Vec { + // Graph built with features does not (yet) support --duplicates. + assert!(self.dep_name_map.is_empty()); + + // Collect a map of package name to Vec<(&Node, usize)>. + let mut packages = HashMap::new(); + for (i, node) in self.nodes.iter().enumerate() { + if let Node::Package { package_id, .. } = node { + packages + .entry(package_id.name()) + .or_insert_with(Vec::new) + .push((node, i)); + } + } + + let mut dupes: Vec<(&Node, usize)> = packages + .into_iter() + .filter(|(_name, indexes)| indexes.len() > 1) + .flat_map(|(_name, indexes)| indexes) + .collect(); + // For consistent output. + dupes.sort_unstable(); + dupes.into_iter().map(|(_node, i)| i).collect() + } +} + +/// Builds the graph. +pub fn build<'a>( + ws: &Workspace<'_>, + resolve: &Resolve, + resolved_features: &ResolvedFeatures, + specs: &[PackageIdSpec], + requested_features: &RequestedFeatures, + target_data: &RustcTargetData, + requested_kinds: &[CompileKind], + package_map: HashMap, + opts: &TreeOptions, +) -> CargoResult> { + let mut graph = Graph::new(package_map); + let mut members_with_features = ws.members_with_features(specs, requested_features)?; + members_with_features.sort_unstable_by_key(|e| e.0.package_id()); + for (member, requested_features) in members_with_features { + let member_id = member.package_id(); + let features_for = FeaturesFor::from_for_host(member.proc_macro()); + for kind in requested_kinds { + let member_index = add_pkg( + &mut graph, + resolve, + resolved_features, + member_id, + features_for, + target_data, + *kind, + opts, + ); + if opts.graph_features { + let fmap = resolve.summary(member_id).features(); + add_cli_features(&mut graph, member_index, &requested_features, fmap); + } + } + } + if opts.graph_features { + add_internal_features(&mut graph, resolve); + } + Ok(graph) +} + +/// Adds a single package node (if it does not already exist). +/// +/// This will also recursively add all of its dependencies. +/// +/// Returns the index to the package node. +fn add_pkg( + graph: &mut Graph<'_>, + resolve: &Resolve, + resolved_features: &ResolvedFeatures, + package_id: PackageId, + features_for: FeaturesFor, + target_data: &RustcTargetData, + requested_kind: CompileKind, + opts: &TreeOptions, +) -> usize { + let node_features = resolved_features.activated_features(package_id, features_for); + let node_kind = match features_for { + FeaturesFor::HostDep => CompileKind::Host, + FeaturesFor::NormalOrDev => requested_kind, + }; + let node = Node::Package { + package_id, + features: node_features.clone(), + kind: node_kind, + }; + if let Some(idx) = graph.index.get(&node) { + return *idx; + } + let from_index = graph.add_node(node); + // Compute the dep name map which is later used for foo/bar feature lookups. + let mut dep_name_map: HashMap> = HashMap::new(); + let mut deps: Vec<_> = resolve.deps(package_id).collect(); + deps.sort_unstable_by_key(|(dep_id, _)| *dep_id); + let show_all_targets = opts.target == super::Target::All; + for (dep_id, deps) in deps { + let mut deps: Vec<_> = deps + .iter() + // This filter is *similar* to the one found in `unit_dependencies::compute_deps`. + // Try to keep them in sync! + .filter(|dep| { + let kind = match (node_kind, dep.kind()) { + (CompileKind::Host, _) => CompileKind::Host, + (_, DepKind::Build) => CompileKind::Host, + (_, DepKind::Normal) => node_kind, + (_, DepKind::Development) => node_kind, + }; + // Filter out inactivated targets. + if !show_all_targets && !target_data.dep_platform_activated(dep, kind) { + return false; + } + // Filter out dev-dependencies if requested. + if !opts.edge_kinds.contains(&EdgeKind::Dep(dep.kind())) { + return false; + } + if dep.is_optional() { + // If the new feature resolver does not enable this + // optional dep, then don't use it. + if !node_features.contains(&dep.name_in_toml()) { + return false; + } + } + true + }) + .collect(); + deps.sort_unstable_by_key(|dep| dep.name_in_toml()); + let dep_pkg = graph.package_map[&dep_id]; + + for dep in deps { + let dep_features_for = if dep.is_build() || dep_pkg.proc_macro() { + FeaturesFor::HostDep + } else { + features_for + }; + let dep_index = add_pkg( + graph, + resolve, + resolved_features, + dep_id, + dep_features_for, + target_data, + requested_kind, + opts, + ); + if opts.graph_features { + // Add the dependency node with feature nodes in-between. + dep_name_map + .entry(dep.name_in_toml()) + .or_default() + .insert((dep_index, dep.is_optional())); + if dep.uses_default_features() { + add_feature( + graph, + InternedString::new("default"), + Some(from_index), + dep_index, + EdgeKind::Dep(dep.kind()), + ); + } + for feature in dep.features() { + add_feature( + graph, + *feature, + Some(from_index), + dep_index, + EdgeKind::Dep(dep.kind()), + ); + } + if !dep.uses_default_features() && dep.features().is_empty() { + // No features, use a direct connection. + graph.edges[from_index].add_edge(EdgeKind::Dep(dep.kind()), dep_index); + } + } else { + graph.edges[from_index].add_edge(EdgeKind::Dep(dep.kind()), dep_index); + } + } + } + if opts.graph_features { + assert!(graph + .dep_name_map + .insert(from_index, dep_name_map) + .is_none()); + } + + from_index +} + +/// Adds a feature node between two nodes. +/// +/// That is, it adds the following: +/// +/// ```text +/// from -Edge-> featname -Edge::Feature-> to +/// ``` +fn add_feature( + graph: &mut Graph<'_>, + name: InternedString, + from: Option, + to: usize, + kind: EdgeKind, +) -> usize { + // `to` *must* point to a package node. + assert!(matches! {graph.nodes[to], Node::Package{..}}); + let node = Node::Feature { + node_index: to, + name, + }; + let node_index = match graph.index.get(&node) { + Some(idx) => *idx, + None => graph.add_node(node), + }; + if let Some(from) = from { + graph.edges[from].add_edge(kind, node_index); + } + graph.edges[node_index].add_edge(EdgeKind::Feature, to); + node_index +} + +/// Adds nodes for features requested on the command-line for the given member. +/// +/// Feature nodes are added as "roots" (i.e., they have no "from" index), +/// because they come from the outside world. They usually only appear with +/// `--invert`. +fn add_cli_features( + graph: &mut Graph<'_>, + package_index: usize, + requested_features: &RequestedFeatures, + feature_map: &FeatureMap, +) { + // NOTE: Recursive enabling of features will be handled by + // add_internal_features. + + // Create a list of feature names requested on the command-line. + let mut to_add: Vec = Vec::new(); + if requested_features.all_features { + to_add.extend(feature_map.keys().copied()); + // Add optional deps. + for (dep_name, deps) in &graph.dep_name_map[&package_index] { + if deps.iter().any(|(_idx, is_optional)| *is_optional) { + to_add.push(*dep_name); + } + } + } else { + if requested_features.uses_default_features { + to_add.push(InternedString::new("default")); + } + to_add.extend(requested_features.features.iter().copied()); + }; + + // Add each feature as a node, and mark as "from command-line" in graph.cli_features. + for name in to_add { + if name.contains('/') { + let mut parts = name.splitn(2, '/'); + let dep_name = InternedString::new(parts.next().unwrap()); + let feat_name = InternedString::new(parts.next().unwrap()); + for (dep_index, is_optional) in graph.dep_name_map[&package_index][&dep_name].clone() { + if is_optional { + // Activate the optional dep on self. + let index = + add_feature(graph, dep_name, None, package_index, EdgeKind::Feature); + graph.cli_features.insert(index); + } + let index = add_feature(graph, feat_name, None, dep_index, EdgeKind::Feature); + graph.cli_features.insert(index); + } + } else { + let index = add_feature(graph, name, None, package_index, EdgeKind::Feature); + graph.cli_features.insert(index); + } + } +} + +/// Recursively adds connections between features in the `[features]` table +/// for every package. +fn add_internal_features(graph: &mut Graph<'_>, resolve: &Resolve) { + // Collect features already activated by dependencies or command-line. + let feature_nodes: Vec<(PackageId, usize, usize, InternedString)> = graph + .nodes + .iter() + .enumerate() + .filter_map(|(i, node)| match node { + Node::Package { .. } => None, + Node::Feature { node_index, name } => { + let package_id = graph.package_id_for_index(*node_index); + Some((package_id, *node_index, i, *name)) + } + }) + .collect(); + + for (package_id, package_index, feature_index, feature_name) in feature_nodes { + add_feature_rec( + graph, + resolve, + feature_name, + package_id, + feature_index, + package_index, + ); + } +} + +/// Recursively add feature nodes for all features enabled by the given feature. +/// +/// `from` is the index of the node that enables this feature. +/// `package_index` is the index of the package node for the feature. +fn add_feature_rec( + graph: &mut Graph<'_>, + resolve: &Resolve, + feature_name: InternedString, + package_id: PackageId, + from: usize, + package_index: usize, +) { + let feature_map = resolve.summary(package_id).features(); + let fvs = match feature_map.get(&feature_name) { + Some(fvs) => fvs, + None => return, + }; + for fv in fvs { + match fv { + FeatureValue::Feature(fv_name) | FeatureValue::Crate(fv_name) => { + let feat_index = add_feature( + graph, + *fv_name, + Some(from), + package_index, + EdgeKind::Feature, + ); + add_feature_rec( + graph, + resolve, + *fv_name, + package_id, + feat_index, + package_index, + ); + } + FeatureValue::CrateFeature(dep_name, fv_name) => { + let dep_indexes = match graph.dep_name_map[&package_index].get(dep_name) { + Some(indexes) => indexes.clone(), + None => { + log::debug!( + "enabling feature {} on {}, found {}/{}, \ + dep appears to not be enabled", + feature_name, + package_id, + dep_name, + fv_name + ); + continue; + } + }; + for (dep_index, is_optional) in dep_indexes { + let dep_pkg_id = graph.package_id_for_index(dep_index); + if is_optional { + // Activate the optional dep on self. + add_feature( + graph, + *dep_name, + Some(from), + package_index, + EdgeKind::Feature, + ); + } + let feat_index = + add_feature(graph, *fv_name, Some(from), dep_index, EdgeKind::Feature); + add_feature_rec(graph, resolve, *fv_name, dep_pkg_id, feat_index, dep_index); + } + } + } + } +} diff -Nru cargo-0.44.1/src/cargo/ops/tree/mod.rs cargo-0.47.0/src/cargo/ops/tree/mod.rs --- cargo-0.44.1/src/cargo/ops/tree/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/tree/mod.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,391 @@ +//! Implementation of `cargo tree`. + +use self::format::Pattern; +use crate::core::compiler::{CompileKind, RustcTargetData}; +use crate::core::dependency::DepKind; +use crate::core::resolver::{ForceAllTargets, HasDevUnits, ResolveOpts}; +use crate::core::{Package, PackageId, PackageIdSpec, Workspace}; +use crate::ops::{self, Packages}; +use crate::util::{CargoResult, Config}; +use crate::{drop_print, drop_println}; +use anyhow::{bail, Context}; +use graph::Graph; +use std::collections::{HashMap, HashSet}; +use std::str::FromStr; + +mod format; +mod graph; + +pub use {graph::EdgeKind, graph::Node}; + +pub struct TreeOptions { + pub features: Vec, + pub no_default_features: bool, + pub all_features: bool, + /// The packages to display the tree for. + pub packages: Packages, + /// The platform to filter for. + pub target: Target, + /// The dependency kinds to display. + pub edge_kinds: HashSet, + pub invert: Vec, + /// The style of prefix for each line. + pub prefix: Prefix, + /// If `true`, duplicates will be repeated. + /// If `false`, duplicates will be marked with `*`, and their dependencies + /// won't be shown. + pub no_dedupe: bool, + /// If `true`, run in a special mode where it will scan for packages that + /// appear with different versions, and report if any where found. Implies + /// `invert`. + pub duplicates: bool, + /// The style of characters to use. + pub charset: Charset, + /// A format string indicating how each package should be displayed. + pub format: String, + /// Includes features in the tree as separate nodes. + pub graph_features: bool, +} + +#[derive(PartialEq)] +pub enum Target { + Host, + Specific(Vec), + All, +} + +impl Target { + pub fn from_cli(targets: Vec) -> Target { + match targets.len() { + 0 => Target::Host, + 1 if targets[0] == "all" => Target::All, + _ => Target::Specific(targets), + } + } +} + +pub enum Charset { + Utf8, + Ascii, +} + +impl FromStr for Charset { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + match s { + "utf8" => Ok(Charset::Utf8), + "ascii" => Ok(Charset::Ascii), + _ => Err("invalid charset"), + } + } +} + +#[derive(Clone, Copy)] +pub enum Prefix { + None, + Indent, + Depth, +} + +impl FromStr for Prefix { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + match s { + "none" => Ok(Prefix::None), + "indent" => Ok(Prefix::Indent), + "depth" => Ok(Prefix::Depth), + _ => Err("invalid prefix"), + } + } +} + +struct Symbols { + down: &'static str, + tee: &'static str, + ell: &'static str, + right: &'static str, +} + +static UTF8_SYMBOLS: Symbols = Symbols { + down: "│", + tee: "├", + ell: "└", + right: "─", +}; + +static ASCII_SYMBOLS: Symbols = Symbols { + down: "|", + tee: "|", + ell: "`", + right: "-", +}; + +/// Entry point for the `cargo tree` command. +pub fn build_and_print(ws: &Workspace<'_>, opts: &TreeOptions) -> CargoResult<()> { + if opts.graph_features && opts.duplicates { + bail!("the `-e features` flag does not support `--duplicates`"); + } + let requested_targets = match &opts.target { + Target::All | Target::Host => Vec::new(), + Target::Specific(t) => t.clone(), + }; + // TODO: Target::All is broken with -Zfeatures=itarget. To handle that properly, + // `FeatureResolver` will need to be taught what "all" means. + let requested_kinds = CompileKind::from_requested_targets(ws.config(), &requested_targets)?; + let target_data = RustcTargetData::new(ws, &requested_kinds)?; + let specs = opts.packages.to_package_id_specs(ws)?; + let resolve_opts = ResolveOpts::new( + /*dev_deps*/ true, + &opts.features, + opts.all_features, + !opts.no_default_features, + ); + let has_dev = if opts + .edge_kinds + .contains(&EdgeKind::Dep(DepKind::Development)) + { + HasDevUnits::Yes + } else { + HasDevUnits::No + }; + let force_all = if opts.target == Target::All { + ForceAllTargets::Yes + } else { + ForceAllTargets::No + }; + let ws_resolve = ops::resolve_ws_with_opts( + ws, + &target_data, + &requested_kinds, + &resolve_opts, + &specs, + has_dev, + force_all, + )?; + // Download all Packages. Some display formats need to display package metadata. + let package_map: HashMap = ws_resolve + .pkg_set + .get_many(ws_resolve.pkg_set.package_ids())? + .into_iter() + .map(|pkg| (pkg.package_id(), pkg)) + .collect(); + + let mut graph = graph::build( + ws, + &ws_resolve.targeted_resolve, + &ws_resolve.resolved_features, + &specs, + &resolve_opts.features, + &target_data, + &requested_kinds, + package_map, + opts, + )?; + + let root_specs = if opts.invert.is_empty() { + specs + } else { + opts.invert + .iter() + .map(|p| PackageIdSpec::parse(p)) + .collect::>>()? + }; + let root_ids = ws_resolve.targeted_resolve.specs_to_ids(&root_specs)?; + let root_indexes = graph.indexes_from_ids(&root_ids); + + let root_indexes = if opts.duplicates { + // `-d -p foo` will only show duplicates within foo's subtree + graph = graph.from_reachable(root_indexes.as_slice()); + graph.find_duplicates() + } else { + root_indexes + }; + + if !opts.invert.is_empty() || opts.duplicates { + graph.invert(); + } + + print(ws.config(), opts, root_indexes, &graph)?; + Ok(()) +} + +/// Prints a tree for each given root. +fn print( + config: &Config, + opts: &TreeOptions, + roots: Vec, + graph: &Graph<'_>, +) -> CargoResult<()> { + let format = Pattern::new(&opts.format) + .with_context(|| format!("tree format `{}` not valid", opts.format))?; + + let symbols = match opts.charset { + Charset::Utf8 => &UTF8_SYMBOLS, + Charset::Ascii => &ASCII_SYMBOLS, + }; + + // The visited deps is used to display a (*) whenever a dep has + // already been printed (ignored with --no-dedupe). + let mut visited_deps = HashSet::new(); + + for (i, root_index) in roots.into_iter().enumerate() { + if i != 0 { + drop_println!(config); + } + + // A stack of bools used to determine where | symbols should appear + // when printing a line. + let mut levels_continue = vec![]; + // The print stack is used to detect dependency cycles when + // --no-dedupe is used. It contains a Node for each level. + let mut print_stack = vec![]; + + print_node( + config, + graph, + root_index, + &format, + symbols, + opts.prefix, + opts.no_dedupe, + &mut visited_deps, + &mut levels_continue, + &mut print_stack, + ); + } + + Ok(()) +} + +/// Prints a package and all of its dependencies. +fn print_node<'a>( + config: &Config, + graph: &'a Graph<'_>, + node_index: usize, + format: &Pattern, + symbols: &Symbols, + prefix: Prefix, + no_dedupe: bool, + visited_deps: &mut HashSet, + levels_continue: &mut Vec, + print_stack: &mut Vec, +) { + let new = no_dedupe || visited_deps.insert(node_index); + + match prefix { + Prefix::Depth => drop_print!(config, "{}", levels_continue.len()), + Prefix::Indent => { + if let Some((last_continues, rest)) = levels_continue.split_last() { + for continues in rest { + let c = if *continues { symbols.down } else { " " }; + drop_print!(config, "{} ", c); + } + + let c = if *last_continues { + symbols.tee + } else { + symbols.ell + }; + drop_print!(config, "{0}{1}{1} ", c, symbols.right); + } + } + Prefix::None => {} + } + + let in_cycle = print_stack.contains(&node_index); + // If this node does not have any outgoing edges, don't include the (*) + // since there isn't really anything "deduplicated", and it generally just + // adds noise. + let has_deps = graph.has_outgoing_edges(node_index); + let star = if (new && !in_cycle) || !has_deps { + "" + } else { + " (*)" + }; + drop_println!(config, "{}{}", format.display(graph, node_index), star); + + if !new || in_cycle { + return; + } + print_stack.push(node_index); + + for kind in &[ + EdgeKind::Dep(DepKind::Normal), + EdgeKind::Dep(DepKind::Build), + EdgeKind::Dep(DepKind::Development), + EdgeKind::Feature, + ] { + print_dependencies( + config, + graph, + node_index, + format, + symbols, + prefix, + no_dedupe, + visited_deps, + levels_continue, + print_stack, + kind, + ); + } + print_stack.pop(); +} + +/// Prints all the dependencies of a package for the given dependency kind. +fn print_dependencies<'a>( + config: &Config, + graph: &'a Graph<'_>, + node_index: usize, + format: &Pattern, + symbols: &Symbols, + prefix: Prefix, + no_dedupe: bool, + visited_deps: &mut HashSet, + levels_continue: &mut Vec, + print_stack: &mut Vec, + kind: &EdgeKind, +) { + let deps = graph.connected_nodes(node_index, kind); + if deps.is_empty() { + return; + } + + let name = match kind { + EdgeKind::Dep(DepKind::Normal) => None, + EdgeKind::Dep(DepKind::Build) => Some("[build-dependencies]"), + EdgeKind::Dep(DepKind::Development) => Some("[dev-dependencies]"), + EdgeKind::Feature => None, + }; + + if let Prefix::Indent = prefix { + if let Some(name) = name { + for continues in &**levels_continue { + let c = if *continues { symbols.down } else { " " }; + drop_print!(config, "{} ", c); + } + + drop_println!(config, "{}", name); + } + } + + let mut it = deps.iter().peekable(); + while let Some(dependency) = it.next() { + levels_continue.push(it.peek().is_some()); + print_node( + config, + graph, + *dependency, + format, + symbols, + prefix, + no_dedupe, + visited_deps, + levels_continue, + print_stack, + ); + levels_continue.pop(); + } +} diff -Nru cargo-0.44.1/src/cargo/ops/vendor.rs cargo-0.47.0/src/cargo/ops/vendor.rs --- cargo-0.44.1/src/cargo/ops/vendor.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/ops/vendor.rs 2020-07-17 20:39:39.000000000 +0000 @@ -8,8 +8,7 @@ use serde::Serialize; use std::collections::HashSet; use std::collections::{BTreeMap, BTreeSet, HashMap}; -use std::fs::{self, File}; -use std::io::Write; +use std::fs; use std::path::{Path, PathBuf}; pub struct VendorOptions<'a> { @@ -20,20 +19,23 @@ } pub fn vendor(ws: &Workspace<'_>, opts: &VendorOptions<'_>) -> CargoResult<()> { + let config = ws.config(); let mut extra_workspaces = Vec::new(); for extra in opts.extra.iter() { - let extra = ws.config().cwd().join(extra); - let ws = Workspace::new(&extra, ws.config())?; + let extra = config.cwd().join(extra); + let ws = Workspace::new(&extra, config)?; extra_workspaces.push(ws); } let workspaces = extra_workspaces.iter().chain(Some(ws)).collect::>(); let vendor_config = - sync(ws.config(), &workspaces, opts).chain_err(|| "failed to sync".to_string())?; + sync(config, &workspaces, opts).chain_err(|| "failed to sync".to_string())?; - let shell = ws.config().shell(); - if shell.verbosity() != Verbosity::Quiet { - eprint!("To use vendored sources, add this to your .cargo/config for this project:\n\n"); - print!("{}", &toml::to_string(&vendor_config).unwrap()); + if config.shell().verbosity() != Verbosity::Quiet { + crate::drop_eprint!( + config, + "To use vendored sources, add this to your .cargo/config for this project:\n\n" + ); + crate::drop_print!(config, "{}", &toml::to_string(&vendor_config).unwrap()); } Ok(()) @@ -223,7 +225,7 @@ "files": map, }); - File::create(&cksum)?.write_all(json.to_string().as_bytes())?; + paths::write(&cksum, json.to_string())?; } for path in to_remove { @@ -331,8 +333,7 @@ paths::create_dir_all(dst.parent().unwrap())?; - fs::copy(&p, &dst) - .chain_err(|| format!("failed to copy `{}` to `{}`", p.display(), dst.display()))?; + paths::copy(&p, &dst)?; let cksum = Sha256::new().update_path(dst)?.finish_hex(); cksums.insert(relative.to_str().unwrap().replace("\\", "/"), cksum); } diff -Nru cargo-0.44.1/src/cargo/sources/git/mod.rs cargo-0.47.0/src/cargo/sources/git/mod.rs --- cargo-0.44.1/src/cargo/sources/git/mod.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/sources/git/mod.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,4 +1,4 @@ pub use self::source::GitSource; -pub use self::utils::{fetch, GitCheckout, GitDatabase, GitRemote, GitRevision}; +pub use self::utils::{fetch, GitCheckout, GitDatabase, GitRemote}; mod source; mod utils; diff -Nru cargo-0.44.1/src/cargo/sources/git/source.rs cargo-0.47.0/src/cargo/sources/git/source.rs --- cargo-0.44.1/src/cargo/sources/git/source.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/sources/git/source.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,23 +1,22 @@ -use std::fmt::{self, Debug, Formatter}; - -use log::trace; -use url::Url; - use crate::core::source::{MaybePackage, Source, SourceId}; use crate::core::GitReference; use crate::core::{Dependency, Package, PackageId, Summary}; -use crate::sources::git::utils::{GitRemote, GitRevision}; +use crate::sources::git::utils::GitRemote; use crate::sources::PathSource; use crate::util::errors::CargoResult; use crate::util::hex::short_hash; use crate::util::Config; +use anyhow::Context; +use log::trace; +use std::fmt::{self, Debug, Formatter}; +use url::Url; pub struct GitSource<'cfg> { remote: GitRemote, - reference: GitReference, + manifest_reference: GitReference, + locked_rev: Option, source_id: SourceId, path_source: Option>, - rev: Option, ident: String, config: &'cfg Config, } @@ -29,17 +28,17 @@ let remote = GitRemote::new(source_id.url()); let ident = ident(&source_id); - let reference = match source_id.precise() { - Some(s) => GitReference::Rev(s.to_string()), - None => source_id.git_reference().unwrap().clone(), - }; - let source = GitSource { remote, - reference, + manifest_reference: source_id.git_reference().unwrap().clone(), + locked_rev: match source_id.precise() { + Some(s) => Some(git2::Oid::from_str(s).with_context(|| { + format!("precise value for git is not a git revision: {}", s) + })?), + None => None, + }, source_id, path_source: None, - rev: None, ident, config, }; @@ -76,7 +75,7 @@ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "git repo at {}", self.remote.url())?; - match self.reference.pretty_ref() { + match self.manifest_reference.pretty_ref() { Some(s) => write!(f, " ({})", s), None => Ok(()), } @@ -117,52 +116,70 @@ let git_path = self.config.assert_package_cache_locked(&git_path); let db_path = git_path.join("db").join(&self.ident); - if self.config.offline() && !db_path.exists() { - anyhow::bail!( - "can't checkout from '{}': you are in the offline mode (--offline)", - self.remote.url() - ); - } - - // Resolve our reference to an actual revision, and check if the - // database already has that revision. If it does, we just load a - // database pinned at that revision, and if we don't we issue an update - // to try to find the revision. - let actual_rev = self.remote.rev_for(&db_path, &self.reference); - let should_update = actual_rev.is_err() || self.source_id.precise().is_none(); - - let (db, actual_rev) = if should_update && !self.config.offline() { - self.config.shell().status( - "Updating", - format!("git repository `{}`", self.remote.url()), - )?; - - trace!("updating git source `{:?}`", self.remote); - - self.remote - .checkout(&db_path, &self.reference, self.config)? - } else { - (self.remote.db_at(&db_path)?, actual_rev.unwrap()) + let db = self.remote.db_at(&db_path).ok(); + let (db, actual_rev) = match (self.locked_rev, db) { + // If we have a locked revision, and we have a preexisting database + // which has that revision, then no update needs to happen. + (Some(rev), Some(db)) if db.contains(rev) => (db, rev), + + // If we're in offline mode, we're not locked, and we have a + // database, then try to resolve our reference with the preexisting + // repository. + (None, Some(db)) if self.config.offline() => { + let rev = db.resolve(&self.manifest_reference).with_context(|| { + "failed to lookup reference in preexisting repository, and \ + can't check for updates in offline mode (--offline)" + })?; + (db, rev) + } + + // ... otherwise we use this state to update the git database. Note + // that we still check for being offline here, for example in the + // situation that we have a locked revision but the database + // doesn't have it. + (locked_rev, db) => { + if self.config.offline() { + anyhow::bail!( + "can't checkout from '{}': you are in the offline mode (--offline)", + self.remote.url() + ); + } + self.config.shell().status( + "Updating", + format!("git repository `{}`", self.remote.url()), + )?; + + trace!("updating git source `{:?}`", self.remote); + + self.remote.checkout( + &db_path, + db, + &self.manifest_reference, + locked_rev, + self.config, + )? + } }; // Don’t use the full hash, in order to contribute less to reaching the // path length limit on Windows. See // . - let short_id = db.to_short_id(&actual_rev).unwrap(); + let short_id = db.to_short_id(actual_rev)?; + // Check out `actual_rev` from the database to a scoped location on the + // filesystem. This will use hard links and such to ideally make the + // checkout operation here pretty fast. let checkout_path = git_path .join("checkouts") .join(&self.ident) .join(short_id.as_str()); - - // Copy the database to the checkout location. db.copy_to(actual_rev.clone(), &checkout_path, self.config)?; let source_id = self.source_id.with_precise(Some(actual_rev.to_string())); let path_source = PathSource::new_recursive(&checkout_path, source_id, self.config); self.path_source = Some(path_source); - self.rev = Some(actual_rev); + self.locked_rev = Some(actual_rev); self.path_source.as_mut().unwrap().update() } @@ -183,7 +200,7 @@ } fn fingerprint(&self, _pkg: &Package) -> CargoResult { - Ok(self.rev.as_ref().unwrap().to_string()) + Ok(self.locked_rev.as_ref().unwrap().to_string()) } fn describe(&self) -> String { diff -Nru cargo-0.44.1/src/cargo/sources/git/utils.rs cargo-0.47.0/src/cargo/sources/git/utils.rs --- cargo-0.44.1/src/cargo/sources/git/utils.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/sources/git/utils.rs 2020-07-17 20:39:39.000000000 +0000 @@ -3,27 +3,18 @@ use crate::util::paths; use crate::util::process_builder::process; use crate::util::{network, Config, IntoUrl, Progress}; -use curl::easy::{Easy, List}; -use git2::{self, ObjectType}; +use anyhow::{anyhow, Context}; +use curl::easy::List; +use git2::{self, ErrorClass, ObjectType}; use log::{debug, info}; use serde::ser; use serde::Serialize; use std::env; use std::fmt; -use std::fs::File; use std::path::{Path, PathBuf}; use std::process::Command; use url::Url; -#[derive(PartialEq, Clone, Debug)] -pub struct GitRevision(git2::Oid); - -impl ser::Serialize for GitRevision { - fn serialize(&self, s: S) -> Result { - serialize_str(self, s) - } -} - fn serialize_str(t: &T, s: S) -> Result where T: fmt::Display, @@ -32,12 +23,6 @@ s.collect_str(t) } -impl fmt::Display for GitRevision { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&self.0, f) - } -} - pub struct GitShortID(git2::Buf); impl GitShortID { @@ -71,7 +56,8 @@ pub struct GitCheckout<'a> { database: &'a GitDatabase, location: PathBuf, - revision: GitRevision, + #[serde(serialize_with = "serialize_str")] + revision: git2::Oid, #[serde(skip_serializing)] repo: git2::Repository, } @@ -87,33 +73,52 @@ &self.url } - pub fn rev_for(&self, path: &Path, reference: &GitReference) -> CargoResult { + pub fn rev_for(&self, path: &Path, reference: &GitReference) -> CargoResult { reference.resolve(&self.db_at(path)?.repo) } pub fn checkout( &self, into: &Path, + db: Option, reference: &GitReference, + locked_rev: Option, cargo_config: &Config, - ) -> CargoResult<(GitDatabase, GitRevision)> { - let mut repo_and_rev = None; - if let Ok(mut repo) = git2::Repository::open(into) { - self.fetch_into(&mut repo, cargo_config) - .chain_err(|| format!("failed to fetch into {}", into.display()))?; - if let Ok(rev) = reference.resolve(&repo) { - repo_and_rev = Some((repo, rev)); - } - } - let (repo, rev) = match repo_and_rev { - Some(pair) => pair, - None => { - let repo = self - .clone_into(into, cargo_config) - .chain_err(|| format!("failed to clone into: {}", into.display()))?; - let rev = reference.resolve(&repo)?; - (repo, rev) + ) -> CargoResult<(GitDatabase, git2::Oid)> { + // If we have a previous instance of `GitDatabase` then fetch into that + // if we can. If that can successfully load our revision then we've + // populated the database with the latest version of `reference`, so + // return that database and the rev we resolve to. + if let Some(mut db) = db { + fetch(&mut db.repo, self.url.as_str(), reference, cargo_config) + .context(format!("failed to fetch into: {}", into.display()))?; + match locked_rev { + Some(rev) => { + if db.contains(rev) { + return Ok((db, rev)); + } + } + None => { + if let Ok(rev) = reference.resolve(&db.repo) { + return Ok((db, rev)); + } + } } + } + + // Otherwise start from scratch to handle corrupt git repositories. + // After our fetch (which is interpreted as a clone now) we do the same + // resolution to figure out what we cloned. + if into.exists() { + paths::remove_dir_all(into)?; + } + paths::create_dir_all(into)?; + let mut repo = init(into, true)?; + fetch(&mut repo, self.url.as_str(), reference, cargo_config) + .context(format!("failed to clone into: {}", into.display()))?; + let rev = match locked_rev { + Some(rev) => rev, + None => reference.resolve(&repo)?, }; Ok(( @@ -134,33 +139,12 @@ repo, }) } - - fn fetch_into(&self, dst: &mut git2::Repository, cargo_config: &Config) -> CargoResult<()> { - // Create a local anonymous remote in the repository to fetch the url - let refspec = "refs/heads/*:refs/heads/*"; - fetch(dst, self.url.as_str(), refspec, cargo_config) - } - - fn clone_into(&self, dst: &Path, cargo_config: &Config) -> CargoResult { - if dst.exists() { - paths::remove_dir_all(dst)?; - } - paths::create_dir_all(dst)?; - let mut repo = init(dst, true)?; - fetch( - &mut repo, - self.url.as_str(), - "refs/heads/*:refs/heads/*", - cargo_config, - )?; - Ok(repo) - } } impl GitDatabase { pub fn copy_to( &self, - rev: GitRevision, + rev: git2::Oid, dest: &Path, cargo_config: &Config, ) -> CargoResult> { @@ -168,15 +152,19 @@ if let Ok(repo) = git2::Repository::open(dest) { let mut co = GitCheckout::new(dest, self, rev.clone(), repo); if !co.is_fresh() { - // After a successful fetch operation do a sanity check to - // ensure we've got the object in our database to reset to. This - // can fail sometimes for corrupt repositories where the fetch - // operation succeeds but the object isn't actually there. + // After a successful fetch operation the subsequent reset can + // fail sometimes for corrupt repositories where the fetch + // operation succeeds but the object isn't actually there in one + // way or another. In these situations just skip the error and + // try blowing away the whole repository and trying with a + // clone. co.fetch(cargo_config)?; - if co.has_object() { - co.reset(cargo_config)?; - assert!(co.is_fresh()); - checkout = Some(co); + match co.reset(cargo_config) { + Ok(()) => { + assert!(co.is_fresh()); + checkout = Some(co); + } + Err(e) => debug!("failed reset after fetch {:?}", e), } } else { checkout = Some(co); @@ -190,37 +178,46 @@ Ok(checkout) } - pub fn to_short_id(&self, revision: &GitRevision) -> CargoResult { - let obj = self.repo.find_object(revision.0, None)?; + pub fn to_short_id(&self, revision: git2::Oid) -> CargoResult { + let obj = self.repo.find_object(revision, None)?; Ok(GitShortID(obj.short_id()?)) } - pub fn has_ref(&self, reference: &str) -> CargoResult<()> { - self.repo.revparse_single(reference)?; - Ok(()) + pub fn contains(&self, oid: git2::Oid) -> bool { + self.repo.revparse_single(&oid.to_string()).is_ok() + } + + pub fn resolve(&self, r: &GitReference) -> CargoResult { + r.resolve(&self.repo) } } impl GitReference { - fn resolve(&self, repo: &git2::Repository) -> CargoResult { - let id = match *self { - GitReference::Tag(ref s) => (|| -> CargoResult { - let refname = format!("refs/tags/{}", s); + pub fn resolve(&self, repo: &git2::Repository) -> CargoResult { + let id = match self { + // Note that we resolve the named tag here in sync with where it's + // fetched into via `fetch` below. + GitReference::Tag(s) => (|| -> CargoResult { + let refname = format!("refs/remotes/origin/tags/{}", s); let id = repo.refname_to_id(&refname)?; let obj = repo.find_object(id, None)?; let obj = obj.peel(ObjectType::Commit)?; Ok(obj.id()) })() .chain_err(|| format!("failed to find tag `{}`", s))?, - GitReference::Branch(ref s) => { + + // Resolve the remote name since that's all we're configuring in + // `fetch` below. + GitReference::Branch(s) => { + let name = format!("origin/{}", s); let b = repo - .find_branch(s, git2::BranchType::Local) + .find_branch(&name, git2::BranchType::Remote) .chain_err(|| format!("failed to find branch `{}`", s))?; b.get() .target() .ok_or_else(|| anyhow::format_err!("branch `{}` did not have a target", s))? } - GitReference::Rev(ref s) => { + GitReference::Rev(s) => { let obj = repo.revparse_single(s)?; match obj.as_tag() { Some(tag) => tag.target_id(), @@ -228,7 +225,7 @@ } } }; - Ok(GitRevision(id)) + Ok(id) } } @@ -236,7 +233,7 @@ fn new( path: &Path, database: &'a GitDatabase, - revision: GitRevision, + revision: git2::Oid, repo: git2::Repository, ) -> GitCheckout<'a> { GitCheckout { @@ -250,7 +247,7 @@ fn clone_into( into: &Path, database: &'a GitDatabase, - revision: GitRevision, + revision: git2::Oid, config: &Config, ) -> CargoResult> { let dirname = into.parent().unwrap(); @@ -282,7 +279,6 @@ .clone_local(git2::build::CloneLocal::Local) .with_checkout(checkout) .fetch_options(fopts) - // .remote_create(|repo, _name, url| repo.remote_anonymous(url)) .clone(url.as_str(), into)?; repo = Some(r); Ok(()) @@ -296,7 +292,7 @@ fn is_fresh(&self) -> bool { match self.repo.revparse_single("HEAD") { - Ok(ref head) if head.id() == self.revision.0 => { + Ok(ref head) if head.id() == self.revision => { // See comments in reset() for why we check this self.location.join(".cargo-ok").exists() } @@ -307,15 +303,11 @@ fn fetch(&mut self, cargo_config: &Config) -> CargoResult<()> { info!("fetch {}", self.repo.path().display()); let url = self.database.path.into_url()?; - let refspec = "refs/heads/*:refs/heads/*"; - fetch(&mut self.repo, url.as_str(), refspec, cargo_config)?; + let reference = GitReference::Rev(self.revision.to_string()); + fetch(&mut self.repo, url.as_str(), &reference, cargo_config)?; Ok(()) } - fn has_object(&self) -> bool { - self.repo.find_object(self.revision.0, None).is_ok() - } - fn reset(&self, config: &Config) -> CargoResult<()> { // If we're interrupted while performing this reset (e.g., we die because // of a signal) Cargo needs to be sure to try to check out this repo @@ -328,9 +320,9 @@ let ok_file = self.location.join(".cargo-ok"); let _ = paths::remove_file(&ok_file); info!("reset {} to {}", self.repo.path().display(), self.revision); - let object = self.repo.find_object(self.revision.0, None)?; + let object = self.repo.find_object(self.revision, None)?; reset(&self.repo, &object, config)?; - File::create(ok_file)?; + paths::create(ok_file)?; Ok(()) } @@ -390,8 +382,11 @@ } }; // Fetch data from origin and reset to the head commit - let refspec = "refs/heads/*:refs/heads/*"; - fetch(&mut repo, url, refspec, cargo_config).chain_err(|| { + let reference = GitReference::Rev(head.to_string()); + cargo_config + .shell() + .status("Updating", format!("git submodule `{}`", url))?; + fetch(&mut repo, url, &reference, cargo_config).chain_err(|| { format!( "failed to fetch submodule `{}` from {}", child.name().unwrap_or(""), @@ -445,9 +440,14 @@ let mut ssh_agent_attempts = Vec::new(); let mut any_attempts = false; let mut tried_sshkey = false; + let mut url_attempt = None; + let orig_url = url; let mut res = f(&mut |url, username, allowed| { any_attempts = true; + if url != orig_url { + url_attempt = Some(url.to_string()); + } // libgit2's "USERNAME" authentication actually means that it's just // asking us for a username to keep going. This is currently only really // used for SSH authentication and isn't really an authentication type. @@ -579,18 +579,26 @@ } } } - - if res.is_ok() || !any_attempts { - return res.map_err(From::from); - } + let mut err = match res { + Ok(e) => return Ok(e), + Err(e) => e, + }; // In the case of an authentication failure (where we tried something) then // we try to give a more helpful error message about precisely what we // tried. - let res = res.map_err(anyhow::Error::from).chain_err(|| { + if any_attempts { let mut msg = "failed to authenticate when downloading \ repository" .to_string(); + + if let Some(attempt) = &url_attempt { + if url != attempt { + msg.push_str(": "); + msg.push_str(attempt); + } + } + msg.push_str("\n"); if !ssh_agent_attempts.is_empty() { let names = ssh_agent_attempts .iter() @@ -598,28 +606,56 @@ .collect::>() .join(", "); msg.push_str(&format!( - "\nattempted ssh-agent authentication, but \ - none of the usernames {} succeeded", + "\n* attempted ssh-agent authentication, but \ + no usernames succeeded: {}", names )); } if let Some(failed_cred_helper) = cred_helper_bad { if failed_cred_helper { msg.push_str( - "\nattempted to find username/password via \ + "\n* attempted to find username/password via \ git's `credential.helper` support, but failed", ); } else { msg.push_str( - "\nattempted to find username/password via \ + "\n* attempted to find username/password via \ `credential.helper`, but maybe the found \ credentials were incorrect", ); } } - msg - })?; - Ok(res) + msg.push_str("\n\n"); + msg.push_str("if the git CLI succeeds then `net.git-fetch-with-cli` may help here\n"); + msg.push_str("https://doc.rust-lang.org/cargo/reference/config.html#netgit-fetch-with-cli"); + err = err.context(msg); + + // Otherwise if we didn't even get to the authentication phase them we may + // have failed to set up a connection, in these cases hint on the + // `net.git-fetch-with-cli` configuration option. + } else if let Some(e) = err.downcast_ref::() { + match e.class() { + ErrorClass::Net + | ErrorClass::Ssl + | ErrorClass::Submodule + | ErrorClass::FetchHead + | ErrorClass::Ssh + | ErrorClass::Callback + | ErrorClass::Http => { + let mut msg = "network failure seems to have happened\n".to_string(); + msg.push_str( + "if a proxy or similar is necessary `net.git-fetch-with-cli` may help here\n", + ); + msg.push_str( + "https://doc.rust-lang.org/cargo/reference/config.html#netgit-fetch-with-cli", + ); + err = err.context(msg); + } + _ => {} + } + } + + Err(err) } fn reset(repo: &git2::Repository, obj: &git2::Object<'_>, config: &Config) -> CargoResult<()> { @@ -628,7 +664,9 @@ opts.progress(|_, cur, max| { drop(pb.tick(cur, max)); }); + debug!("doing reset"); repo.reset(obj, git2::ResetType::Hard, Some(&mut opts))?; + debug!("reset done"); Ok(()) } @@ -653,8 +691,7 @@ // Create a local anonymous remote in the repository to fetch the // url let mut opts = git2::FetchOptions::new(); - opts.remote_callbacks(rcb) - .download_tags(git2::AutotagOption::All); + opts.remote_callbacks(rcb); cb(opts) })?; Ok(()) @@ -664,7 +701,7 @@ pub fn fetch( repo: &mut git2::Repository, url: &str, - refspec: &str, + reference: &GitReference, config: &Config, ) -> CargoResult<()> { if config.frozen() { @@ -679,19 +716,10 @@ // If we're fetching from GitHub, attempt GitHub's special fast path for // testing if we've already got an up-to-date copy of the repository - - if let Ok(url) = Url::parse(url) { - if url.host_str() == Some("github.com") { - if let Ok(oid) = repo.refname_to_id("refs/remotes/origin/master") { - let mut handle = config.http()?.borrow_mut(); - debug!("attempting GitHub fast path for {}", url); - if github_up_to_date(&mut handle, &url, &oid) { - return Ok(()); - } else { - debug!("fast path failed, falling back to a git fetch"); - } - } - } + match github_up_to_date(repo, url, reference, config) { + Ok(true) => return Ok(()), + Ok(false) => {} + Err(e) => debug!("failed to check github {:?}", e), } // We reuse repositories quite a lot, so before we go through and update the @@ -700,6 +728,29 @@ // request we're about to issue. maybe_gc_repo(repo)?; + // Translate the reference desired here into an actual list of refspecs + // which need to get fetched. Additionally record if we're fetching tags. + let mut refspecs = Vec::new(); + let mut tags = false; + match reference { + // For branches and tags we can fetch simply one reference and copy it + // locally, no need to fetch other branches/tags. + GitReference::Branch(b) => { + refspecs.push(format!("refs/heads/{0}:refs/remotes/origin/{0}", b)); + } + GitReference::Tag(t) => { + refspecs.push(format!("refs/tags/{0}:refs/remotes/origin/tags/{0}", t)); + } + + // For `rev` dependencies we don't know what the rev will point to. To + // handle this situation we fetch all branches and tags, and then we + // pray it's somewhere in there. + GitReference::Rev(_) => { + refspecs.push(format!("refs/heads/*:refs/remotes/origin/*")); + tags = true; + } + } + // Unfortunately `libgit2` is notably lacking in the realm of authentication // when compared to the `git` command line. As a result, allow an escape // hatch for users that would prefer to use `git`-the-CLI for fetching @@ -707,12 +758,15 @@ // flavors of authentication possible while also still giving us all the // speed and portability of using `libgit2`. if let Some(true) = config.net_config()?.git_fetch_with_cli { - return fetch_with_cli(repo, url, refspec, config); + return fetch_with_cli(repo, url, &refspecs, tags, config); } debug!("doing a fetch for {}", url); let git_config = git2::Config::open_default()?; with_fetch_options(&git_config, url, config, &mut |mut opts| { + if tags { + opts.download_tags(git2::AutotagOption::All); + } // The `fetch` operation here may fail spuriously due to a corrupt // repository. It could also fail, however, for a whole slew of other // reasons (aka network related reasons). We want Cargo to automatically @@ -725,10 +779,10 @@ // blown away the repository, then we want to return the error as-is. let mut repo_reinitialized = false; loop { - debug!("initiating fetch of {} from {}", refspec, url); + debug!("initiating fetch of {:?} from {}", refspecs, url); let res = repo .remote_anonymous(url)? - .fetch(&[refspec], Some(&mut opts), None); + .fetch(&refspecs, Some(&mut opts), None); let err = match res { Ok(()) => break, Err(e) => e, @@ -755,16 +809,19 @@ fn fetch_with_cli( repo: &mut git2::Repository, url: &str, - refspec: &str, + refspecs: &[String], + tags: bool, config: &Config, ) -> CargoResult<()> { let mut cmd = process("git"); - cmd.arg("fetch") - .arg("--tags") // fetch all tags - .arg("--force") // handle force pushes + cmd.arg("fetch"); + if tags { + cmd.arg("--tags"); + } + cmd.arg("--force") // handle force pushes .arg("--update-head-ok") // see discussion in #2078 .arg(url) - .arg(refspec) + .args(refspecs) // If cargo is run by git (for example, the `exec` command in `git // rebase`), the GIT_DIR is set by git and will point to the wrong // location (this takes precedence over the cwd). Make sure this is @@ -884,9 +941,9 @@ /// made. /// /// This function will attempt to hit that fast path and verify that the `oid` -/// is actually the current `master` branch of the repository. If `true` is -/// returned then no update needs to be performed, but if `false` is returned -/// then the standard update logic still needs to happen. +/// is actually the current branch of the repository. If `true` is returned then +/// no update needs to be performed, but if `false` is returned then the +/// standard update logic still needs to happen. /// /// [1]: https://developer.github.com/v3/repos/commits/#get-the-sha-1-of-a-commit-reference /// @@ -894,37 +951,62 @@ /// just a fast path. As a result all errors are ignored in this function and we /// just return a `bool`. Any real errors will be reported through the normal /// update path above. -fn github_up_to_date(handle: &mut Easy, url: &Url, oid: &git2::Oid) -> bool { - macro_rules! r#try { - ($e:expr) => { - match $e { - Some(e) => e, - None => return false, - } - }; +fn github_up_to_date( + repo: &mut git2::Repository, + url: &str, + reference: &GitReference, + config: &Config, +) -> CargoResult { + let url = Url::parse(url)?; + if url.host_str() != Some("github.com") { + return Ok(false); } + let github_branch_name = match reference { + GitReference::Branch(branch) => branch, + GitReference::Tag(tag) => tag, + GitReference::Rev(_) => { + debug!("can't use github fast path with `rev`"); + return Ok(false); + } + }; + // This expects GitHub urls in the form `github.com/user/repo` and nothing // else - let mut pieces = r#try!(url.path_segments()); - let username = r#try!(pieces.next()); - let repo = r#try!(pieces.next()); + let mut pieces = url + .path_segments() + .ok_or_else(|| anyhow!("no path segments on url"))?; + let username = pieces + .next() + .ok_or_else(|| anyhow!("couldn't find username"))?; + let repository = pieces + .next() + .ok_or_else(|| anyhow!("couldn't find repository name"))?; if pieces.next().is_some() { - return false; + anyhow::bail!("too many segments on URL"); } + // Trim off the `.git` from the repository, if present, since that's + // optional for GitHub and won't work when we try to use the API as well. + let repository = if repository.ends_with(".git") { + &repository[..repository.len() - 4] + } else { + repository + }; + let url = format!( - "https://api.github.com/repos/{}/{}/commits/master", - username, repo + "https://api.github.com/repos/{}/{}/commits/{}", + username, repository, github_branch_name, ); - r#try!(handle.get(true).ok()); - r#try!(handle.url(&url).ok()); - r#try!(handle.useragent("cargo").ok()); + let mut handle = config.http()?.borrow_mut(); + debug!("attempting GitHub fast path for {}", url); + handle.get(true)?; + handle.url(&url)?; + handle.useragent("cargo")?; let mut headers = List::new(); - r#try!(headers.append("Accept: application/vnd.github.3.sha").ok()); - r#try!(headers.append(&format!("If-None-Match: \"{}\"", oid)).ok()); - r#try!(handle.http_headers(headers).ok()); - r#try!(handle.perform().ok()); - - r#try!(handle.response_code().ok()) == 304 + headers.append("Accept: application/vnd.github.3.sha")?; + headers.append(&format!("If-None-Match: \"{}\"", reference.resolve(repo)?))?; + handle.http_headers(headers)?; + handle.perform()?; + Ok(handle.response_code()? == 304) } diff -Nru cargo-0.44.1/src/cargo/sources/path.rs cargo-0.47.0/src/cargo/sources/path.rs --- cargo-0.44.1/src/cargo/sources/path.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/sources/path.rs 2020-07-17 20:39:39.000000000 +0000 @@ -96,6 +96,15 @@ /// are relevant for building this package, but it also contains logic to /// use other methods like .gitignore to filter the list of files. pub fn list_files(&self, pkg: &Package) -> CargoResult> { + self._list_files(pkg).chain_err(|| { + format!( + "failed to determine list of files in {}", + pkg.root().display() + ) + }) + } + + fn _list_files(&self, pkg: &Package) -> CargoResult> { let root = pkg.root(); let no_include_option = pkg.manifest().include().is_empty(); @@ -111,17 +120,21 @@ } let ignore_include = include_builder.build()?; - let ignore_should_package = |relative_path: &Path| -> CargoResult { + let ignore_should_package = |relative_path: &Path, is_dir: bool| -> CargoResult { // "Include" and "exclude" options are mutually exclusive. if no_include_option { - match ignore_exclude - .matched_path_or_any_parents(relative_path, /* is_dir */ false) - { + match ignore_exclude.matched_path_or_any_parents(relative_path, is_dir) { Match::None => Ok(true), Match::Ignore(_) => Ok(false), Match::Whitelist(_) => Ok(true), } } else { + if is_dir { + // Generally, include directives don't list every + // directory (nor should they!). Just skip all directory + // checks, and only check files. + return Ok(true); + } match ignore_include .matched_path_or_any_parents(relative_path, /* is_dir */ false) { @@ -132,7 +145,7 @@ } }; - let mut filter = |path: &Path| -> CargoResult { + let mut filter = |path: &Path, is_dir: bool| -> CargoResult { let relative_path = path.strip_prefix(root)?; let rel = relative_path.as_os_str(); @@ -142,13 +155,13 @@ return Ok(true); } - ignore_should_package(relative_path) + ignore_should_package(relative_path, is_dir) }; // Attempt Git-prepopulate only if no `include` (see rust-lang/cargo#4135). if no_include_option { - if let Some(result) = self.discover_git_and_list_files(pkg, root, &mut filter) { - return result; + if let Some(result) = self.discover_git_and_list_files(pkg, root, &mut filter)? { + return Ok(result); } // no include option and not git repo discovered (see rust-lang/cargo#7183). return self.list_files_walk_except_dot_files_and_dirs(pkg, &mut filter); @@ -162,50 +175,53 @@ &self, pkg: &Package, root: &Path, - filter: &mut dyn FnMut(&Path) -> CargoResult, - ) -> Option>> { - // If this package is in a Git repository, then we really do want to - // query the Git repository as it takes into account items such as - // `.gitignore`. We're not quite sure where the Git repository is, - // however, so we do a bit of a probe. - // - // We walk this package's path upwards and look for a sibling - // `Cargo.toml` and `.git` directory. If we find one then we assume that - // we're part of that repository. - let mut cur = root; - loop { - if cur.join("Cargo.toml").is_file() { - // If we find a Git repository next to this `Cargo.toml`, we still - // check to see if we are indeed part of the index. If not, then - // this is likely an unrelated Git repo, so keep going. - if let Ok(repo) = git2::Repository::open(cur) { - let index = match repo.index() { - Ok(index) => index, - Err(err) => return Some(Err(err.into())), - }; - let path = root.strip_prefix(cur).unwrap().join("Cargo.toml"); - if index.get_path(&path, 0).is_some() { - return Some(self.list_files_git(pkg, &repo, filter)); - } - } + filter: &mut dyn FnMut(&Path, bool) -> CargoResult, + ) -> CargoResult>> { + let repo = match git2::Repository::discover(root) { + Ok(repo) => repo, + Err(e) => { + log::debug!( + "could not discover git repo at or above {}: {}", + root.display(), + e + ); + return Ok(None); } - // Don't cross submodule boundaries. - if cur.join(".git").is_dir() { - break; - } - match cur.parent() { - Some(parent) => cur = parent, - None => break, + }; + let index = repo + .index() + .chain_err(|| format!("failed to open git index at {}", repo.path().display()))?; + let repo_root = repo.workdir().ok_or_else(|| { + anyhow::format_err!( + "did not expect repo at {} to be bare", + repo.path().display() + ) + })?; + let repo_relative_path = match paths::strip_prefix_canonical(root, repo_root) { + Ok(p) => p, + Err(e) => { + log::warn!( + "cannot determine if path `{:?}` is in git repo `{:?}`: {:?}", + root, + repo_root, + e + ); + return Ok(None); } + }; + let manifest_path = repo_relative_path.join("Cargo.toml"); + if index.get_path(&manifest_path, 0).is_some() { + return Ok(Some(self.list_files_git(pkg, &repo, filter)?)); } - None + // Package Cargo.toml is not in git, don't use git to guide our selection. + Ok(None) } fn list_files_git( &self, pkg: &Package, repo: &git2::Repository, - filter: &mut dyn FnMut(&Path) -> CargoResult, + filter: &mut dyn FnMut(&Path, bool) -> CargoResult, ) -> CargoResult> { warn!("list_files_git {}", pkg.package_id()); let index = repo.index()?; @@ -289,7 +305,10 @@ continue; } - if is_dir.unwrap_or_else(|| file_path.is_dir()) { + // `is_dir` is None for symlinks. The `unwrap` checks if the + // symlink points to a directory. + let is_dir = is_dir.unwrap_or_else(|| file_path.is_dir()); + if is_dir { warn!(" found submodule {}", file_path.display()); let rel = file_path.strip_prefix(root)?; let rel = rel.to_str().ok_or_else(|| { @@ -307,7 +326,8 @@ PathSource::walk(&file_path, &mut ret, false, filter)?; } } - } else if (*filter)(&file_path)? { + } else if (*filter)(&file_path, is_dir)? { + assert!(!is_dir); // We found a file! warn!(" found {}", file_path.display()); ret.push(file_path); @@ -338,29 +358,28 @@ fn list_files_walk_except_dot_files_and_dirs( &self, pkg: &Package, - filter: &mut dyn FnMut(&Path) -> CargoResult, + filter: &mut dyn FnMut(&Path, bool) -> CargoResult, ) -> CargoResult> { let root = pkg.root(); let mut exclude_dot_files_dir_builder = GitignoreBuilder::new(root); exclude_dot_files_dir_builder.add_line(None, ".*")?; let ignore_dot_files_and_dirs = exclude_dot_files_dir_builder.build()?; - let mut filter_ignore_dot_files_and_dirs = |path: &Path| -> CargoResult { - let relative_path = path.strip_prefix(root)?; - match ignore_dot_files_and_dirs - .matched_path_or_any_parents(relative_path, /* is_dir */ false) - { - Match::Ignore(_) => Ok(false), - _ => filter(path), - } - }; + let mut filter_ignore_dot_files_and_dirs = + |path: &Path, is_dir: bool| -> CargoResult { + let relative_path = path.strip_prefix(root)?; + match ignore_dot_files_and_dirs.matched_path_or_any_parents(relative_path, is_dir) { + Match::Ignore(_) => Ok(false), + _ => filter(path, is_dir), + } + }; self.list_files_walk(pkg, &mut filter_ignore_dot_files_and_dirs) } fn list_files_walk( &self, pkg: &Package, - filter: &mut dyn FnMut(&Path) -> CargoResult, + filter: &mut dyn FnMut(&Path, bool) -> CargoResult, ) -> CargoResult> { let mut ret = Vec::new(); PathSource::walk(pkg.root(), &mut ret, true, filter)?; @@ -371,16 +390,18 @@ path: &Path, ret: &mut Vec, is_root: bool, - filter: &mut dyn FnMut(&Path) -> CargoResult, + filter: &mut dyn FnMut(&Path, bool) -> CargoResult, ) -> CargoResult<()> { - if !fs::metadata(&path).map(|m| m.is_dir()).unwrap_or(false) { - if (*filter)(path)? { - ret.push(path.to_path_buf()); - } + let is_dir = path.is_dir(); + if !is_root && !(*filter)(path, is_dir)? { + return Ok(()); + } + if !is_dir { + ret.push(path.to_path_buf()); return Ok(()); } // Don't recurse into any sub-packages that we have. - if !is_root && fs::metadata(&path.join("Cargo.toml")).is_ok() { + if !is_root && path.join("Cargo.toml").exists() { return Ok(()); } @@ -415,7 +436,12 @@ let mut max = FileTime::zero(); let mut max_path = PathBuf::new(); - for file in self.list_files(pkg)? { + for file in self.list_files(pkg).chain_err(|| { + format!( + "failed to determine the most recently modified file in {}", + pkg.root().display() + ) + })? { // An `fs::stat` error here is either because path is a // broken symlink, a permissions error, or a race // condition where this path was `rm`-ed -- either way, diff -Nru cargo-0.44.1/src/cargo/sources/registry/index.rs cargo-0.47.0/src/cargo/sources/registry/index.rs --- cargo-0.44.1/src/cargo/sources/registry/index.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/sources/registry/index.rs 2020-07-17 20:39:39.000000000 +0000 @@ -67,8 +67,9 @@ //! hopefully those are more obvious inline in the code itself. use crate::core::dependency::Dependency; -use crate::core::{InternedString, PackageId, SourceId, Summary}; +use crate::core::{PackageId, SourceId, Summary}; use crate::sources::registry::{RegistryData, RegistryPackage}; +use crate::util::interning::InternedString; use crate::util::paths; use crate::util::{internal, CargoResult, Config, Filesystem, ToSemver}; use log::info; @@ -336,7 +337,7 @@ // along the way produce helpful "did you mean?" suggestions. for path in UncanonicalizedIter::new(&raw_path).take(1024) { let summaries = Summaries::parse( - index_version.as_ref().map(|s| &**s), + index_version.as_deref(), root, &cache_root, path.as_ref(), @@ -722,7 +723,8 @@ .into_iter() .map(|dep| dep.into_dep(source_id)) .collect::>>()?; - let mut summary = Summary::new(pkgid, deps, &features, links, false)?; + let namespaced_features = false; + let mut summary = Summary::new(pkgid, deps, &features, links, namespaced_features)?; summary.set_checksum(cksum); Ok(IndexSummary { summary, diff -Nru cargo-0.44.1/src/cargo/sources/registry/local.rs cargo-0.47.0/src/cargo/sources/registry/local.rs --- cargo-0.44.1/src/cargo/sources/registry/local.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/sources/registry/local.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,6 +1,7 @@ -use crate::core::{InternedString, PackageId}; +use crate::core::PackageId; use crate::sources::registry::{MaybeLock, RegistryConfig, RegistryData}; use crate::util::errors::CargoResult; +use crate::util::interning::InternedString; use crate::util::paths; use crate::util::{Config, Filesystem, Sha256}; use std::fs::File; @@ -85,7 +86,7 @@ // crate files here never change in that we're not the one writing them, // so it's not our responsibility to synchronize access to them. let path = self.root.join(&crate_file).into_path_unlocked(); - let mut crate_file = File::open(&path)?; + let mut crate_file = paths::open(&path)?; // If we've already got an unpacked version of this crate, then skip the // checksum below as it is in theory already verified. diff -Nru cargo-0.44.1/src/cargo/sources/registry/mod.rs cargo-0.47.0/src/cargo/sources/registry/mod.rs --- cargo-0.44.1/src/cargo/sources/registry/mod.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/sources/registry/mod.rs 2020-07-17 20:39:39.000000000 +0000 @@ -173,18 +173,21 @@ use crate::core::dependency::{DepKind, Dependency}; use crate::core::source::MaybePackage; -use crate::core::{InternedString, Package, PackageId, Source, SourceId, Summary}; +use crate::core::{Package, PackageId, Source, SourceId, Summary}; use crate::sources::PathSource; use crate::util::errors::CargoResultExt; use crate::util::hex; +use crate::util::interning::InternedString; use crate::util::into_url::IntoUrl; -use crate::util::{CargoResult, Config, Filesystem}; +use crate::util::{restricted_names, CargoResult, Config, Filesystem}; const PACKAGE_SOURCE_LOCK: &str = ".cargo-ok"; pub const CRATES_IO_INDEX: &str = "https://github.com/rust-lang/crates.io-index"; pub const CRATES_IO_REGISTRY: &str = "crates-io"; const CRATE_TEMPLATE: &str = "{crate}"; const VERSION_TEMPLATE: &str = "{version}"; +const PREFIX_TEMPLATE: &str = "{prefix}"; +const LOWER_PREFIX_TEMPLATE: &str = "{lowerprefix}"; pub struct RegistrySource<'cfg> { source_id: SourceId, @@ -203,10 +206,14 @@ /// The string is a template which will generate the download URL for the /// tarball of a specific version of a crate. The substrings `{crate}` and /// `{version}` will be replaced with the crate's name and version - /// respectively. + /// respectively. The substring `{prefix}` will be replaced with the + /// crate's prefix directory name, and the substring `{lowerprefix}` will + /// be replaced with the crate's prefix directory name converted to + /// lowercase. /// - /// For backwards compatibility, if the string does not contain `{crate}` or - /// `{version}`, it will be extended with `/{crate}/{version}/download` to + /// For backwards compatibility, if the string does not contain any + /// markers (`{crate}`, `{version}`, `{prefix}`, or ``{lowerprefix}`), it + /// will be extended with `/{crate}/{version}/download` to /// support registries like crates.io which were created before the /// templating setup was created. pub dl: String, @@ -217,6 +224,7 @@ pub api: Option, } +/// A single line in the index representing a single version of a package. #[derive(Deserialize)] pub struct RegistryPackage<'a> { name: InternedString, @@ -225,7 +233,15 @@ deps: Vec>, features: BTreeMap>, cksum: String, + /// If `true`, Cargo will skip this version when resolving. + /// + /// This was added in 2014. Everything in the crates.io index has this set + /// now, so this probably doesn't need to be an option anymore. yanked: Option, + /// Native library name this package links to. + /// + /// Added early 2018 (see https://github.com/rust-lang/cargo/pull/4978), + /// can be `None` if published before then. links: Option, } @@ -315,7 +331,7 @@ if package.is_some() { dep.set_explicit_name_in_toml(name); } - let kind = match kind.as_ref().map(|s| &s[..]).unwrap_or("") { + let kind = match kind.as_deref().unwrap_or("") { "dev" => DepKind::Development, "build" => DepKind::Build, _ => DepKind::Normal, @@ -459,11 +475,11 @@ .create(true) .read(true) .write(true) - .open(&path)?; + .open(&path) + .chain_err(|| format!("failed to open `{}`", path.display()))?; let gz = GzDecoder::new(tarball); let mut tar = Archive::new(gz); - tar.set_preserve_mtime(false); let prefix = unpack_dir.file_name().unwrap(); let parent = unpack_dir.parent().unwrap(); for entry in tar.entries()? { @@ -487,11 +503,18 @@ prefix ) } - - // Once that's verified, unpack the entry as usual. - entry - .unpack_in(parent) - .chain_err(|| format!("failed to unpack entry at `{}`", entry_path.display()))?; + // Unpacking failed + let mut result = entry.unpack_in(parent).map_err(anyhow::Error::from); + if cfg!(windows) && restricted_names::is_windows_reserved_path(&entry_path) { + result = result.chain_err(|| { + format!( + "`{}` appears to contain a reserved Windows path, \ + it cannot be extracted on Windows", + entry_path.display() + ) + }); + } + result.chain_err(|| format!("failed to unpack entry at `{}`", entry_path.display()))?; } // Write to the lock file to indicate that unpacking was successful. diff -Nru cargo-0.44.1/src/cargo/sources/registry/remote.rs cargo-0.47.0/src/cargo/sources/registry/remote.rs --- cargo-0.44.1/src/cargo/sources/registry/remote.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/sources/registry/remote.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,25 +1,39 @@ -use crate::core::{InternedString, PackageId, SourceId}; +use crate::core::{GitReference, PackageId, SourceId}; use crate::sources::git; use crate::sources::registry::MaybeLock; -use crate::sources::registry::{RegistryConfig, RegistryData, CRATE_TEMPLATE, VERSION_TEMPLATE}; +use crate::sources::registry::{ + RegistryConfig, RegistryData, CRATE_TEMPLATE, LOWER_PREFIX_TEMPLATE, PREFIX_TEMPLATE, + VERSION_TEMPLATE, +}; use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::interning::InternedString; use crate::util::paths; use crate::util::{Config, Filesystem, Sha256}; use lazycell::LazyCell; use log::{debug, trace}; use std::cell::{Cell, Ref, RefCell}; use std::fmt::Write as FmtWrite; -use std::fs::{File, OpenOptions}; +use std::fs::{self, File, OpenOptions}; use std::io::prelude::*; use std::io::SeekFrom; use std::mem; use std::path::Path; use std::str; +fn make_crate_prefix(name: &str) -> String { + match name.len() { + 1 => format!("1"), + 2 => format!("2"), + 3 => format!("3/{}", &name[..1]), + _ => format!("{}/{}", &name[0..2], &name[2..4]), + } +} + pub struct RemoteRegistry<'cfg> { index_path: Filesystem, cache_path: Filesystem, source_id: SourceId, + index_git_ref: GitReference, config: &'cfg Config, tree: RefCell>>, repo: LazyCell, @@ -34,6 +48,8 @@ cache_path: config.registry_cache_path().join(name), source_id, config, + // TODO: we should probably make this configurable + index_git_ref: GitReference::Branch("master".to_string()), tree: RefCell::new(None), repo: LazyCell::new(), head: Cell::new(None), @@ -85,7 +101,8 @@ fn head(&self) -> CargoResult { if self.head.get().is_none() { - let oid = self.repo()?.refname_to_id("refs/remotes/origin/master")?; + let repo = self.repo()?; + let oid = self.index_git_ref.resolve(repo)?; self.head.set(Some(oid)); } Ok(self.head.get().unwrap()) @@ -215,17 +232,17 @@ .shell() .status("Updating", self.source_id.display_index())?; - // git fetch origin master + // Fetch the latest version of our `index_git_ref` into the index + // checkout. let url = self.source_id.url(); - let refspec = "refs/heads/master:refs/remotes/origin/master"; let repo = self.repo.borrow_mut().unwrap(); - git::fetch(repo, url.as_str(), refspec, self.config) + git::fetch(repo, url.as_str(), &self.index_git_ref, self.config) .chain_err(|| format!("failed to fetch `{}`", url))?; self.config.updated_sources().insert(self.source_id); // Create a dummy file to record the mtime for when we updated the // index. - File::create(&path.join(LAST_UPDATED_FILE))?; + paths::create(&path.join(LAST_UPDATED_FILE))?; Ok(()) } @@ -250,12 +267,19 @@ let config = self.config()?.unwrap(); let mut url = config.dl; - if !url.contains(CRATE_TEMPLATE) && !url.contains(VERSION_TEMPLATE) { + if !url.contains(CRATE_TEMPLATE) + && !url.contains(VERSION_TEMPLATE) + && !url.contains(PREFIX_TEMPLATE) + && !url.contains(LOWER_PREFIX_TEMPLATE) + { write!(url, "/{}/{}/download", CRATE_TEMPLATE, VERSION_TEMPLATE).unwrap(); } + let prefix = make_crate_prefix(&*pkg.name()); let url = url .replace(CRATE_TEMPLATE, &*pkg.name()) - .replace(VERSION_TEMPLATE, &pkg.version().to_string()); + .replace(VERSION_TEMPLATE, &pkg.version().to_string()) + .replace(PREFIX_TEMPLATE, &prefix) + .replace(LOWER_PREFIX_TEMPLATE, &prefix.to_lowercase()); Ok(MaybeLock::Download { url, @@ -283,7 +307,8 @@ .create(true) .read(true) .write(true) - .open(&path)?; + .open(&path) + .chain_err(|| format!("failed to open `{}`", path.display()))?; let meta = dst.metadata()?; if meta.len() > 0 { return Ok(dst); @@ -300,10 +325,8 @@ let path = self.cache_path.join(path); let path = self.config.assert_package_cache_locked(&path); - if let Ok(dst) = File::open(path) { - if let Ok(meta) = dst.metadata() { - return meta.len() > 0; - } + if let Ok(meta) = fs::metadata(path) { + return meta.len() > 0; } false } @@ -315,3 +338,18 @@ self.tree.borrow_mut().take(); } } + +#[cfg(test)] +mod tests { + use super::make_crate_prefix; + + #[test] + fn crate_prefix() { + assert_eq!(make_crate_prefix("a"), "1"); + assert_eq!(make_crate_prefix("ab"), "2"); + assert_eq!(make_crate_prefix("abc"), "3/a"); + assert_eq!(make_crate_prefix("Abc"), "3/A"); + assert_eq!(make_crate_prefix("AbCd"), "Ab/Cd"); + assert_eq!(make_crate_prefix("aBcDe"), "aB/cD"); + } +} diff -Nru cargo-0.44.1/src/cargo/util/command_prelude.rs cargo-0.47.0/src/cargo/util/command_prelude.rs --- cargo-0.44.1/src/cargo/util/command_prelude.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/command_prelude.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,9 +1,9 @@ use crate::core::compiler::{BuildConfig, MessageFormat}; -use crate::core::InternedString; use crate::core::Workspace; use crate::ops::{CompileFilter, CompileOptions, NewOptions, Packages, VersionControl}; use crate::sources::CRATES_IO_REGISTRY; use crate::util::important_paths::find_root_manifest_for_wd; +use crate::util::interning::InternedString; use crate::util::{paths, toml::TomlProfile, validate_package_name}; use crate::util::{ print_available_benches, print_available_binaries, print_available_examples, @@ -13,7 +13,6 @@ use anyhow::bail; use clap::{self, SubCommand}; use std::ffi::{OsStr, OsString}; -use std::fs; use std::path::PathBuf; pub use crate::core::compiler::CompileMode; @@ -37,6 +36,20 @@ ._arg(multi_opt("exclude", "SPEC", exclude)) } + /// Variant of arg_package_spec that does not include the `--all` flag + /// (but does include `--workspace`). Used to avoid confusion with + /// historical uses of `--all`. + fn arg_package_spec_no_all( + self, + package: &'static str, + all: &'static str, + exclude: &'static str, + ) -> Self { + self.arg_package_spec_simple(package) + ._arg(opt("workspace", all)) + ._arg(multi_opt("exclude", "SPEC", exclude)) + } + fn arg_package_spec_simple(self, package: &'static str) -> Self { self._arg(multi_opt("package", "SPEC", package).short("p")) } @@ -104,7 +117,7 @@ self._arg(multi_opt( "features", "FEATURES", - "Space-separated list of features to activate", + "Space or comma separated list of features to activate", )) ._arg(opt("all-features", "Activate all available features")) ._arg(opt( @@ -126,7 +139,7 @@ } fn arg_target_triple(self, target: &'static str) -> Self { - self._arg(opt("target", target).value_name("TRIPLE")) + self._arg(multi_opt("target", target, "TRIPLE")) } fn arg_target_dir(self) -> Self { @@ -150,6 +163,10 @@ )) } + fn arg_unit_graph(self) -> Self { + self._arg(opt("unit-graph", "Output build graph in JSON (unstable)").hidden(true)) + } + fn arg_new_opts(self) -> Self { self._arg( opt( @@ -267,7 +284,7 @@ if !path.ends_with("Cargo.toml") { anyhow::bail!("the manifest-path must be a path to a Cargo.toml file") } - if fs::metadata(&path).is_err() { + if !path.exists() { anyhow::bail!( "manifest path `{}` does not exist", self._value_of("manifest-path").unwrap() @@ -284,7 +301,7 @@ if config.cli_unstable().avoid_dev_deps { ws.set_require_optional_deps(false); } - if ws.is_virtual() && !config.cli_unstable().package_features { + if ws.is_virtual() && !ws.allows_unstable_package_features() { // --all-features is actually honored. In general, workspaces and // feature flags are a bit of a mess right now. for flag in &["features", "no-default-features"] { @@ -304,8 +321,8 @@ self.value_of_u32("jobs") } - fn target(&self) -> Option { - self._value_of("target").map(|s| s.to_string()) + fn targets(&self) -> Vec { + self._values_of("target") } fn get_profile_name( @@ -358,20 +375,23 @@ } } - fn compile_options<'a>( - &self, - config: &'a Config, - mode: CompileMode, - workspace: Option<&Workspace<'a>>, - profile_checking: ProfileChecking, - ) -> CargoResult> { - let spec = Packages::from_flags( + fn packages_from_flags(&self) -> CargoResult { + Packages::from_flags( // TODO Integrate into 'workspace' self._is_present("workspace") || self._is_present("all"), self._values_of("exclude"), self._values_of("package"), - )?; + ) + } + fn compile_options( + &self, + config: &Config, + mode: CompileMode, + workspace: Option<&Workspace<'_>>, + profile_checking: ProfileChecking, + ) -> CargoResult { + let spec = self.packages_from_flags()?; let mut message_format = None; let default_json = MessageFormat::Json { short: false, @@ -434,18 +454,23 @@ } } - let mut build_config = BuildConfig::new(config, self.jobs()?, &self.target(), mode)?; + let mut build_config = BuildConfig::new(config, self.jobs()?, &self.targets(), mode)?; build_config.message_format = message_format.unwrap_or(MessageFormat::Human); build_config.requested_profile = self.get_profile_name(config, "dev", profile_checking)?; build_config.build_plan = self._is_present("build-plan"); + build_config.unit_graph = self._is_present("unit-graph"); if build_config.build_plan { config .cli_unstable() .fail_if_stable_opt("--build-plan", 5579)?; }; + if build_config.unit_graph { + config + .cli_unstable() + .fail_if_stable_opt("--unit-graph", 8002)?; + } let opts = CompileOptions { - config, build_config, features: self._values_of("features"), all_features: self._is_present("all-features"), @@ -467,7 +492,6 @@ target_rustc_args: None, local_rustdoc_args: None, rustdoc_document_private_items: false, - export_dir: None, }; if let Some(ws) = workspace { @@ -477,13 +501,13 @@ Ok(opts) } - fn compile_options_for_single_package<'a>( + fn compile_options_for_single_package( &self, - config: &'a Config, + config: &Config, mode: CompileMode, - workspace: Option<&Workspace<'a>>, + workspace: Option<&Workspace<'_>>, profile_checking: ProfileChecking, - ) -> CargoResult> { + ) -> CargoResult { let mut compile_opts = self.compile_options(config, mode, workspace, profile_checking)?; compile_opts.spec = Packages::Packages(self._values_of("package")); Ok(compile_opts) @@ -556,7 +580,7 @@ fn check_optional_opts( &self, workspace: &Workspace<'_>, - compile_opts: &CompileOptions<'_>, + compile_opts: &CompileOptions, ) -> CargoResult<()> { if self.is_present_with_zero_values("example") { print_available_examples(workspace, compile_opts)?; diff -Nru cargo-0.44.1/src/cargo/util/config/de.rs cargo-0.47.0/src/cargo/util/config/de.rs --- cargo-0.44.1/src/cargo/util/config/de.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/config/de.rs 2020-07-17 20:39:39.000000000 +0000 @@ -195,11 +195,31 @@ } } + fn deserialize_enum( + self, + _name: &'static str, + _variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + let value = self + .config + .get_string_priv(&self.key)? + .ok_or_else(|| ConfigError::missing(&self.key))?; + + let Value { val, definition } = value; + visitor + .visit_enum(val.into_deserializer()) + .map_err(|e: ConfigError| e.with_key_context(&self.key, definition)) + } + // These aren't really supported, yet. serde::forward_to_deserialize_any! { f32 f64 char str bytes byte_buf unit unit_struct - enum identifier ignored_any + identifier ignored_any } } diff -Nru cargo-0.44.1/src/cargo/util/config/mod.rs cargo-0.47.0/src/cargo/util/config/mod.rs --- cargo-0.44.1/src/cargo/util/config/mod.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/config/mod.rs 2020-07-17 20:39:39.000000000 +0000 @@ -70,6 +70,7 @@ use url::Url; use self::ConfigValue as CV; +use crate::core::compiler::rustdoc::RustdocExternMap; use crate::core::shell::Verbosity; use crate::core::{nightly_features_allowed, CliUnstable, Shell, SourceId, Workspace}; use crate::ops; @@ -121,7 +122,7 @@ /// relating to cargo itself. #[derive(Debug)] pub struct Config { - /// The location of the user's 'home' directory. OS-dependent. + /// The location of the user's Cargo home directory. OS-dependent. home_path: Filesystem, /// Information about how to write messages to the shell shell: RefCell, @@ -172,6 +173,7 @@ net_config: LazyCell, build_config: LazyCell, target_cfgs: LazyCell>, + doc_extern_map: LazyCell, } impl Config { @@ -241,6 +243,7 @@ net_config: LazyCell::new(), build_config: LazyCell::new(), target_cfgs: LazyCell::new(), + doc_extern_map: LazyCell::new(), } } @@ -314,9 +317,19 @@ .into_path_unlocked() }); let wrapper = self.maybe_get_tool("rustc_wrapper", &self.build_config()?.rustc_wrapper); + let rustc_workspace_wrapper = self.maybe_get_tool( + "rustc_workspace_wrapper", + &self.build_config()?.rustc_workspace_wrapper, + ); + + if !self.cli_unstable().unstable_options && rustc_workspace_wrapper.is_some() { + bail!("Usage of `RUSTC_WORKSPACE_WRAPPER` requires `-Z unstable-options`") + } + Rustc::new( self.get_tool("rustc", &self.build_config()?.rustc), wrapper, + rustc_workspace_wrapper, &self .home() .join("bin") @@ -693,7 +706,7 @@ // Ignore errors in the configuration files. let term = self.get::("term").unwrap_or_default(); - let color = color.or_else(|| term.color.as_ref().map(|s| s.as_ref())); + let color = color.or_else(|| term.color.as_deref()); let verbosity = match (verbose, term.verbose, quiet) { (true, _, false) | (_, Some(true), false) => Verbosity::Verbose, @@ -939,8 +952,8 @@ let possible = dir.join(filename_without_extension); let possible_with_extension = dir.join(format!("{}.toml", filename_without_extension)); - if fs::metadata(&possible).is_ok() { - if warn && fs::metadata(&possible_with_extension).is_ok() { + if possible.exists() { + if warn && possible_with_extension.exists() { // We don't want to print a warning if the version // without the extension is just a symlink to the version // WITH an extension, which people may want to do to @@ -963,7 +976,7 @@ } Ok(Some(possible)) - } else if fs::metadata(&possible_with_extension).is_ok() { + } else if possible_with_extension.exists() { Ok(Some(possible_with_extension)) } else { Ok(None) @@ -998,23 +1011,31 @@ /// Gets the index for a registry. pub fn get_registry_index(&self, registry: &str) -> CargoResult { validate_package_name(registry, "registry name", "")?; - Ok( - match self.get_string(&format!("registries.{}.index", registry))? { - Some(index) => self.resolve_registry_index(index)?, - None => bail!("No index found for registry: `{}`", registry), - }, - ) + if let Some(index) = self.get_string(&format!("registries.{}.index", registry))? { + self.resolve_registry_index(&index).chain_err(|| { + format!( + "invalid index URL for registry `{}` defined in {}", + registry, index.definition + ) + }) + } else { + bail!("no index found for registry: `{}`", registry); + } } - /// Gets the index for the default registry. - pub fn get_default_registry_index(&self) -> CargoResult> { - Ok(match self.get_string("registry.index")? { - Some(index) => Some(self.resolve_registry_index(index)?), - None => None, - }) + /// Returns an error if `registry.index` is set. + pub fn check_registry_index_not_set(&self) -> CargoResult<()> { + if self.get_string("registry.index")?.is_some() { + bail!( + "the `registry.index` config value is no longer supported\n\ + Use `[source]` replacement to alter the default index for crates.io." + ); + } + Ok(()) } - fn resolve_registry_index(&self, index: Value) -> CargoResult { + fn resolve_registry_index(&self, index: &Value) -> CargoResult { + // This handles relative file: URLs, relative to the config definition. let base = index .definition .root(self) @@ -1023,7 +1044,7 @@ let _parsed = index.val.into_url()?; let url = index.val.into_url_with_base(Some(&*base))?; if url.password().is_some() { - bail!("Registry URLs may not contain passwords"); + bail!("registry URLs may not contain passwords"); } Ok(url) } @@ -1141,6 +1162,14 @@ .try_borrow_with(|| target::load_target_cfgs(self)) } + pub fn doc_extern_map(&self) -> CargoResult<&RustdocExternMap> { + // Note: This does not support environment variables. The `Unit` + // fundamentally does not have access to the registry name, so there is + // nothing to query. Plumbing the name into SourceId is quite challenging. + self.doc_extern_map + .try_borrow_with(|| self.get::("doc.extern-map")) + } + /// Returns the `[target]` table definition for the given target triple. pub fn target_cfg_triple(&self, target: &str) -> CargoResult { target::load_target_triple(self, target) @@ -1423,12 +1452,10 @@ fn merge(&mut self, from: ConfigValue, force: bool) -> CargoResult<()> { match (self, from) { (&mut CV::List(ref mut old, _), CV::List(ref mut new, _)) => { - let new = mem::replace(new, Vec::new()); - old.extend(new.into_iter()); + old.extend(mem::take(new).into_iter()); } (&mut CV::Table(ref mut old, _), CV::Table(ref mut new, _)) => { - let new = mem::replace(new, HashMap::new()); - for (key, value) in new { + for (key, value) in mem::take(new) { match old.entry(key.clone()) { Occupied(mut entry) => { let new_def = value.definition().clone(); @@ -1609,9 +1636,11 @@ let contents = toml.to_string(); file.seek(SeekFrom::Start(0))?; - file.write_all(contents.as_bytes())?; + file.write_all(contents.as_bytes()) + .chain_err(|| format!("failed to write to `{}`", file.path().display()))?; file.file().set_len(contents.len() as u64)?; - set_permissions(file.file(), 0o600)?; + set_permissions(file.file(), 0o600) + .chain_err(|| format!("failed to set permissions of `{}`", file.path().display()))?; return Ok(()); @@ -1645,15 +1674,6 @@ } } -/// returns path to clippy-driver binary -/// -/// Allows override of the path via `CARGO_CLIPPY_DRIVER` env variable -pub fn clippy_driver() -> PathBuf { - env::var("CARGO_CLIPPY_DRIVER") - .unwrap_or_else(|_| "clippy-driver".into()) - .into() -} - #[derive(Debug, Default, Deserialize, PartialEq)] #[serde(rename_all = "kebab-case")] pub struct CargoHttpConfig { @@ -1714,6 +1734,7 @@ pub rustflags: Option, pub rustdocflags: Option, pub rustc_wrapper: Option, + pub rustc_workspace_wrapper: Option, pub rustc: Option, pub rustdoc: Option, pub out_dir: Option, @@ -1737,3 +1758,45 @@ &self.0 } } + +#[macro_export] +macro_rules! __shell_print { + ($config:expr, $which:ident, $newline:literal, $($arg:tt)*) => ({ + let mut shell = $config.shell(); + let out = shell.$which(); + drop(out.write_fmt(format_args!($($arg)*))); + if $newline { + drop(out.write_all(b"\n")); + } + }); +} + +#[macro_export] +macro_rules! drop_println { + ($config:expr) => ( $crate::drop_print!($config, "\n") ); + ($config:expr, $($arg:tt)*) => ( + $crate::__shell_print!($config, out, true, $($arg)*) + ); +} + +#[macro_export] +macro_rules! drop_eprintln { + ($config:expr) => ( $crate::drop_eprint!($config, "\n") ); + ($config:expr, $($arg:tt)*) => ( + $crate::__shell_print!($config, err, true, $($arg)*) + ); +} + +#[macro_export] +macro_rules! drop_print { + ($config:expr, $($arg:tt)*) => ( + $crate::__shell_print!($config, out, false, $($arg)*) + ); +} + +#[macro_export] +macro_rules! drop_eprint { + ($config:expr, $($arg:tt)*) => ( + $crate::__shell_print!($config, err, false, $($arg)*) + ); +} diff -Nru cargo-0.44.1/src/cargo/util/config/target.rs cargo-0.47.0/src/cargo/util/config/target.rs --- cargo-0.44.1/src/cargo/util/config/target.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/config/target.rs 2020-07-17 20:39:39.000000000 +0000 @@ -5,7 +5,7 @@ use std::collections::{BTreeMap, HashMap}; use std::path::PathBuf; -/// Config definition of a [target.'cfg(…)'] table. +/// Config definition of a `[target.'cfg(…)']` table. /// /// This is a subset of `TargetConfig`. #[derive(Debug, Deserialize)] @@ -19,7 +19,7 @@ pub other: BTreeMap, } -/// Config definition of a [target] table. +/// Config definition of a `[target]` table. #[derive(Debug)] pub struct TargetConfig { /// Process to run as a wrapper for `cargo run`, `test`, and `bench` commands. diff -Nru cargo-0.44.1/src/cargo/util/cpu.rs cargo-0.47.0/src/cargo/util/cpu.rs --- cargo-0.44.1/src/cargo/util/cpu.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/cpu.rs 2020-07-17 20:39:39.000000000 +0000 @@ -26,8 +26,7 @@ #[cfg(target_os = "linux")] mod imp { - use std::fs::File; - use std::io::{self, Read}; + use std::{fs, io}; pub struct State { user: u64, @@ -43,8 +42,7 @@ } pub fn current() -> io::Result { - let mut state = String::new(); - File::open("/proc/stat")?.read_to_string(&mut state)?; + let state = fs::read_to_string("/proc/stat")?; (|| { let mut parts = state.lines().next()?.split_whitespace(); diff -Nru cargo-0.44.1/src/cargo/util/dependency_queue.rs cargo-0.47.0/src/cargo/util/dependency_queue.rs --- cargo-0.44.1/src/cargo/util/dependency_queue.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/dependency_queue.rs 2020-07-17 20:39:39.000000000 +0000 @@ -53,7 +53,7 @@ } impl DependencyQueue { - /// Adds a new ndoe and its dependencies to this queue. + /// Adds a new node and its dependencies to this queue. /// /// The `key` specified is a new node in the dependency graph, and the node /// depend on all the dependencies iterated by `dependencies`. Each diff -Nru cargo-0.44.1/src/cargo/util/errors.rs cargo-0.47.0/src/cargo/util/errors.rs --- cargo-0.44.1/src/cargo/util/errors.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/errors.rs 2020-07-17 20:39:39.000000000 +0000 @@ -259,7 +259,7 @@ } } - pub fn hint(&self, ws: &Workspace<'_>, opts: &CompileOptions<'_>) -> String { + pub fn hint(&self, ws: &Workspace<'_>, opts: &CompileOptions) -> String { match self.test { Test::UnitTest { ref kind, diff -Nru cargo-0.44.1/src/cargo/util/flock.rs cargo-0.47.0/src/cargo/util/flock.rs --- cargo-0.44.1/src/cargo/util/flock.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/flock.rs 2020-07-17 20:39:39.000000000 +0000 @@ -3,14 +3,12 @@ use std::io::{Read, Seek, SeekFrom, Write}; use std::path::{Display, Path, PathBuf}; -use fs2::{lock_contended_error, FileExt}; use termcolor::Color::Cyan; -#[cfg(windows)] -use winapi::shared::winerror::ERROR_INVALID_FUNCTION; use crate::util::errors::{CargoResult, CargoResultExt}; use crate::util::paths; use crate::util::Config; +use sys::*; #[derive(Debug)] pub struct FileLock { @@ -95,7 +93,7 @@ fn drop(&mut self) { if self.state != State::Unlocked { if let Some(f) = self.f.take() { - let _ = f.unlock(); + let _ = unlock(&f); } } } @@ -150,8 +148,7 @@ /// Handles errors where other Cargo processes are also attempting to /// concurrently create this directory. pub fn create_dir(&self) -> CargoResult<()> { - paths::create_dir_all(&self.root)?; - Ok(()) + paths::create_dir_all(&self.root) } /// Returns an adaptor that can be used to print the path of this @@ -231,13 +228,13 @@ .chain_err(|| format!("failed to open: {}", path.display()))?; match state { State::Exclusive => { - acquire(config, msg, &path, &|| f.try_lock_exclusive(), &|| { - f.lock_exclusive() + acquire(config, msg, &path, &|| try_lock_exclusive(&f), &|| { + lock_exclusive(&f) })?; } State::Shared => { - acquire(config, msg, &path, &|| f.try_lock_shared(), &|| { - f.lock_shared() + acquire(config, msg, &path, &|| try_lock_shared(&f), &|| { + lock_shared(&f) })?; } State::Unlocked => {} @@ -281,8 +278,8 @@ config: &Config, msg: &str, path: &Path, - r#try: &dyn Fn() -> io::Result<()>, - block: &dyn Fn() -> io::Result<()>, + lock_try: &dyn Fn() -> io::Result<()>, + lock_block: &dyn Fn() -> io::Result<()>, ) -> CargoResult<()> { // File locking on Unix is currently implemented via `flock`, which is known // to be broken on NFS. We could in theory just ignore errors that happen on @@ -298,34 +295,26 @@ return Ok(()); } - match r#try() { + match lock_try() { Ok(()) => return Ok(()), // In addition to ignoring NFS which is commonly not working we also // just ignore locking on filesystems that look like they don't - // implement file locking. We detect that here via the return value of - // locking (e.g., inspecting errno). - #[cfg(unix)] - Err(ref e) if e.raw_os_error() == Some(libc::ENOTSUP) => return Ok(()), - - #[cfg(target_os = "linux")] - Err(ref e) if e.raw_os_error() == Some(libc::ENOSYS) => return Ok(()), - - #[cfg(windows)] - Err(ref e) if e.raw_os_error() == Some(ERROR_INVALID_FUNCTION as i32) => return Ok(()), + // implement file locking. + Err(e) if error_unsupported(&e) => return Ok(()), Err(e) => { - if e.raw_os_error() != lock_contended_error().raw_os_error() { + if !error_contended(&e) { let e = anyhow::Error::from(e); let cx = format!("failed to lock file: {}", path.display()); - return Err(e.context(cx).into()); + return Err(e.context(cx)); } } } let msg = format!("waiting for file lock on {}", msg); config.shell().status_with_color("Blocking", &msg, Cyan)?; - block().chain_err(|| format!("failed to lock file: {}", path.display()))?; + lock_block().chain_err(|| format!("failed to lock file: {}", path.display()))?; return Ok(()); #[cfg(all(target_os = "linux", not(target_env = "musl")))] @@ -352,3 +341,121 @@ false } } + +#[cfg(unix)] +mod sys { + use std::fs::File; + use std::io::{Error, Result}; + use std::os::unix::io::AsRawFd; + + pub(super) fn lock_shared(file: &File) -> Result<()> { + flock(file, libc::LOCK_SH) + } + + pub(super) fn lock_exclusive(file: &File) -> Result<()> { + flock(file, libc::LOCK_EX) + } + + pub(super) fn try_lock_shared(file: &File) -> Result<()> { + flock(file, libc::LOCK_SH | libc::LOCK_NB) + } + + pub(super) fn try_lock_exclusive(file: &File) -> Result<()> { + flock(file, libc::LOCK_EX | libc::LOCK_NB) + } + + pub(super) fn unlock(file: &File) -> Result<()> { + flock(file, libc::LOCK_UN) + } + + pub(super) fn error_contended(err: &Error) -> bool { + err.raw_os_error().map_or(false, |x| x == libc::EWOULDBLOCK) + } + + pub(super) fn error_unsupported(err: &Error) -> bool { + match err.raw_os_error() { + Some(libc::ENOTSUP) => true, + #[cfg(target_os = "linux")] + Some(libc::ENOSYS) => true, + _ => false, + } + } + + #[cfg(not(target_os = "solaris"))] + fn flock(file: &File, flag: libc::c_int) -> Result<()> { + let ret = unsafe { libc::flock(file.as_raw_fd(), flag) }; + if ret < 0 { + Err(Error::last_os_error()) + } else { + Ok(()) + } + } + + #[cfg(target_os = "solaris")] + fn flock(file: &File, flag: libc::c_int) -> Result<()> { + // Solaris lacks flock(), so simply succeed with a no-op + Ok(()) + } +} + +#[cfg(windows)] +mod sys { + use std::fs::File; + use std::io::{Error, Result}; + use std::mem; + use std::os::windows::io::AsRawHandle; + + use winapi::shared::minwindef::DWORD; + use winapi::shared::winerror::{ERROR_INVALID_FUNCTION, ERROR_LOCK_VIOLATION}; + use winapi::um::fileapi::{LockFileEx, UnlockFile}; + use winapi::um::minwinbase::{LOCKFILE_EXCLUSIVE_LOCK, LOCKFILE_FAIL_IMMEDIATELY}; + + pub(super) fn lock_shared(file: &File) -> Result<()> { + lock_file(file, 0) + } + + pub(super) fn lock_exclusive(file: &File) -> Result<()> { + lock_file(file, LOCKFILE_EXCLUSIVE_LOCK) + } + + pub(super) fn try_lock_shared(file: &File) -> Result<()> { + lock_file(file, LOCKFILE_FAIL_IMMEDIATELY) + } + + pub(super) fn try_lock_exclusive(file: &File) -> Result<()> { + lock_file(file, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY) + } + + pub(super) fn error_contended(err: &Error) -> bool { + err.raw_os_error() + .map_or(false, |x| x == ERROR_LOCK_VIOLATION as i32) + } + + pub(super) fn error_unsupported(err: &Error) -> bool { + err.raw_os_error() + .map_or(false, |x| x == ERROR_INVALID_FUNCTION as i32) + } + + pub(super) fn unlock(file: &File) -> Result<()> { + unsafe { + let ret = UnlockFile(file.as_raw_handle(), 0, 0, !0, !0); + if ret == 0 { + Err(Error::last_os_error()) + } else { + Ok(()) + } + } + } + + fn lock_file(file: &File, flags: DWORD) -> Result<()> { + unsafe { + let mut overlapped = mem::zeroed(); + let ret = LockFileEx(file.as_raw_handle(), flags, 0, !0, !0, &mut overlapped); + if ret == 0 { + Err(Error::last_os_error()) + } else { + Ok(()) + } + } + } +} diff -Nru cargo-0.44.1/src/cargo/util/graph.rs cargo-0.47.0/src/cargo/util/graph.rs --- cargo-0.44.1/src/cargo/util/graph.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/graph.rs 2020-07-17 20:39:39.000000000 +0000 @@ -37,7 +37,7 @@ self.nodes.get(from)?.get(to) } - pub fn edges(&self, from: &N) -> impl Iterator { + pub fn edges(&self, from: &N) -> impl Iterator { self.nodes.get(from).into_iter().flat_map(|x| x.iter()) } @@ -95,8 +95,8 @@ p.iter() // Note that we can have "cycles" introduced through dev-dependency // edges, so make sure we don't loop infinitely. - .find(|&(node, _)| !result.contains(&node)) - .map(|(ref p, _)| p) + .find(|(node, _)| !result.contains(node)) + .map(|(p, _)| p) }) { result.push(p); pkg = p; @@ -114,11 +114,11 @@ let first_pkg_depending_on = |pkg: &N, res: &[&N]| { self.nodes .iter() - .filter(|&(_, adjacent)| adjacent.contains_key(pkg)) + .filter(|(_, adjacent)| adjacent.contains_key(pkg)) // Note that we can have "cycles" introduced through dev-dependency // edges, so make sure we don't loop infinitely. - .find(|&(node, _)| !res.contains(&node)) - .map(|(ref p, _)| p) + .find(|(node, _)| !res.contains(node)) + .map(|(p, _)| p) }; while let Some(p) = first_pkg_depending_on(pkg, &result) { result.push(p); diff -Nru cargo-0.44.1/src/cargo/util/hasher.rs cargo-0.47.0/src/cargo/util/hasher.rs --- cargo-0.44.1/src/cargo/util/hasher.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/hasher.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,24 @@ +//! Implementation of a hasher that produces the same values across releases. +//! +//! The hasher should be fast and have a low chance of collisions (but is not +//! sufficient for cryptographic purposes). +#![allow(deprecated)] + +use std::hash::{Hasher, SipHasher}; + +pub struct StableHasher(SipHasher); + +impl StableHasher { + pub fn new() -> StableHasher { + StableHasher(SipHasher::new()) + } +} + +impl Hasher for StableHasher { + fn finish(&self) -> u64 { + self.0.finish() + } + fn write(&mut self, bytes: &[u8]) { + self.0.write(bytes) + } +} diff -Nru cargo-0.44.1/src/cargo/util/hex.rs cargo-0.47.0/src/cargo/util/hex.rs --- cargo-0.44.1/src/cargo/util/hex.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/hex.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,7 +1,6 @@ -#![allow(deprecated)] - +use super::StableHasher; use std::fs::File; -use std::hash::{Hash, Hasher, SipHasher}; +use std::hash::{Hash, Hasher}; use std::io::Read; pub fn to_hex(num: u64) -> String { @@ -18,13 +17,13 @@ } pub fn hash_u64(hashable: H) -> u64 { - let mut hasher = SipHasher::new(); + let mut hasher = StableHasher::new(); hashable.hash(&mut hasher); hasher.finish() } pub fn hash_u64_file(mut file: &File) -> std::io::Result { - let mut hasher = SipHasher::new_with_keys(0, 0); + let mut hasher = StableHasher::new(); let mut buf = [0; 64 * 1024]; loop { let n = file.read(&mut buf)?; diff -Nru cargo-0.44.1/src/cargo/util/important_paths.rs cargo-0.47.0/src/cargo/util/important_paths.rs --- cargo-0.44.1/src/cargo/util/important_paths.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/important_paths.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,6 +1,5 @@ use crate::util::errors::CargoResult; use crate::util::paths; -use std::fs; use std::path::{Path, PathBuf}; /// Finds the root `Cargo.toml`. @@ -8,7 +7,7 @@ let file = "Cargo.toml"; for current in paths::ancestors(cwd) { let manifest = current.join(file); - if fs::metadata(&manifest).is_ok() { + if manifest.exists() { return Ok(manifest); } } diff -Nru cargo-0.44.1/src/cargo/util/interning.rs cargo-0.47.0/src/cargo/util/interning.rs --- cargo-0.44.1/src/cargo/util/interning.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/interning.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,182 @@ +use serde::{Serialize, Serializer}; +use std::borrow::Borrow; +use std::cmp::Ordering; +use std::collections::HashSet; +use std::ffi::OsStr; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::ops::Deref; +use std::path::Path; +use std::ptr; +use std::str; +use std::sync::Mutex; + +fn leak(s: String) -> &'static str { + Box::leak(s.into_boxed_str()) +} + +lazy_static::lazy_static! { + static ref STRING_CACHE: Mutex> = Mutex::new(HashSet::new()); +} + +#[derive(Clone, Copy)] +pub struct InternedString { + inner: &'static str, +} + +impl<'a> From<&'a str> for InternedString { + fn from(item: &'a str) -> Self { + InternedString::new(item) + } +} + +impl<'a> From<&'a String> for InternedString { + fn from(item: &'a String) -> Self { + InternedString::new(item) + } +} + +impl From for InternedString { + fn from(item: String) -> Self { + InternedString::new(&item) + } +} + +impl PartialEq for InternedString { + fn eq(&self, other: &InternedString) -> bool { + ptr::eq(self.as_str(), other.as_str()) + } +} + +impl PartialEq for InternedString { + fn eq(&self, other: &str) -> bool { + *self == other + } +} + +impl<'a> PartialEq<&'a str> for InternedString { + fn eq(&self, other: &&str) -> bool { + **self == **other + } +} + +impl Eq for InternedString {} + +impl InternedString { + pub fn new(str: &str) -> InternedString { + let mut cache = STRING_CACHE.lock().unwrap(); + let s = cache.get(str).cloned().unwrap_or_else(|| { + let s = leak(str.to_string()); + cache.insert(s); + s + }); + + InternedString { inner: s } + } + + pub fn as_str(&self) -> &'static str { + self.inner + } +} + +impl Deref for InternedString { + type Target = str; + + fn deref(&self) -> &'static str { + self.as_str() + } +} + +impl AsRef for InternedString { + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl AsRef for InternedString { + fn as_ref(&self) -> &OsStr { + self.as_str().as_ref() + } +} + +impl AsRef for InternedString { + fn as_ref(&self) -> &Path { + self.as_str().as_ref() + } +} + +impl Hash for InternedString { + // N.B., we can't implement this as `identity(self).hash(state)`, + // because we use this for on-disk fingerprints and so need + // stability across Cargo invocations. + fn hash(&self, state: &mut H) { + self.as_str().hash(state); + } +} + +impl Borrow for InternedString { + // If we implement Hash as `identity(self).hash(state)`, + // then this will need to be removed. + fn borrow(&self) -> &str { + self.as_str() + } +} + +impl fmt::Debug for InternedString { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(self.as_str(), f) + } +} + +impl fmt::Display for InternedString { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self.as_str(), f) + } +} + +impl Ord for InternedString { + fn cmp(&self, other: &InternedString) -> Ordering { + self.as_str().cmp(other.as_str()) + } +} + +impl PartialOrd for InternedString { + fn partial_cmp(&self, other: &InternedString) -> Option { + Some(self.cmp(other)) + } +} + +impl Serialize for InternedString { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(self.inner) + } +} + +struct InternedStringVisitor; + +impl<'de> serde::Deserialize<'de> for InternedString { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_str(InternedStringVisitor) + } +} + +impl<'de> serde::de::Visitor<'de> for InternedStringVisitor { + type Value = InternedString; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("an String like thing") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + Ok(InternedString::new(v)) + } +} diff -Nru cargo-0.44.1/src/cargo/util/job.rs cargo-0.47.0/src/cargo/util/job.rs --- cargo-0.44.1/src/cargo/util/job.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/job.rs 2020-07-17 20:39:39.000000000 +0000 @@ -2,7 +2,7 @@ //! //! Most of the time when you're running cargo you expect Ctrl-C to actually //! terminate the entire tree of processes in play, not just the one at the top -//! (cago). This currently works "by default" on Unix platforms because Ctrl-C +//! (cargo). This currently works "by default" on Unix platforms because Ctrl-C //! actually sends a signal to the *process group* rather than the parent //! process, so everything will get torn down. On Windows, however, this does //! not happen and Ctrl-C just kills cargo. diff -Nru cargo-0.44.1/src/cargo/util/machine_message.rs cargo-0.47.0/src/cargo/util/machine_message.rs --- cargo-0.44.1/src/cargo/util/machine_message.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/machine_message.rs 2020-07-17 20:39:39.000000000 +0000 @@ -90,3 +90,14 @@ "timing-info" } } + +#[derive(Serialize)] +pub struct BuildFinished { + pub success: bool, +} + +impl Message for BuildFinished { + fn reason(&self) -> &str { + "build-finished" + } +} diff -Nru cargo-0.44.1/src/cargo/util/mod.rs cargo-0.47.0/src/cargo/util/mod.rs --- cargo-0.44.1/src/cargo/util/mod.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/mod.rs 2020-07-17 20:39:39.000000000 +0000 @@ -9,6 +9,7 @@ pub use self::errors::{CargoTestError, CliError, ProcessError}; pub use self::flock::{FileLock, Filesystem}; pub use self::graph::Graph; +pub use self::hasher::StableHasher; pub use self::hex::{hash_u64, short_hash, to_hex}; pub use self::into_url::IntoUrl; pub use self::into_url_with_base::IntoUrlWithBase; @@ -18,7 +19,9 @@ pub use self::paths::{dylib_path_envvar, normalize_path}; pub use self::process_builder::{process, ProcessBuilder}; pub use self::progress::{Progress, ProgressStyle}; +pub use self::queue::Queue; pub use self::read2::read2; +pub use self::restricted_names::validate_package_name; pub use self::rustc::Rustc; pub use self::sha256::Sha256; pub use self::to_semver::ToSemver; @@ -37,8 +40,10 @@ pub mod errors; mod flock; pub mod graph; +mod hasher; pub mod hex; pub mod important_paths; +pub mod interning; pub mod into_url; mod into_url_with_base; pub mod job; @@ -50,7 +55,9 @@ pub mod process_builder; pub mod profile; mod progress; +mod queue; mod read2; +pub mod restricted_names; pub mod rustc; mod sha256; pub mod to_semver; @@ -68,22 +75,6 @@ } } -/// Check the base requirements for a package name. -/// -/// This can be used for other things than package names, to enforce some -/// level of sanity. Note that package names have other restrictions -/// elsewhere. `cargo new` has a few restrictions, such as checking for -/// reserved names. crates.io has even more restrictions. -pub fn validate_package_name(name: &str, what: &str, help: &str) -> CargoResult<()> { - if let Some(ch) = name - .chars() - .find(|ch| !ch.is_alphanumeric() && *ch != '_' && *ch != '-') - { - anyhow::bail!("Invalid character `{}` in {}: `{}`{}", ch, what, name, help); - } - Ok(()) -} - /// Whether or not this running in a Continuous Integration environment. pub fn is_ci() -> bool { std::env::var("CI").is_ok() || std::env::var("TF_BUILD").is_ok() diff -Nru cargo-0.44.1/src/cargo/util/network.rs cargo-0.47.0/src/cargo/util/network.rs --- cargo-0.44.1/src/cargo/util/network.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/network.rs 2020-07-17 20:39:39.000000000 +0000 @@ -20,9 +20,9 @@ match f() { Err(ref e) if maybe_spurious(e) && self.remaining > 0 => { let msg = format!( - "spurious network error ({} tries \ - remaining): {}", - self.remaining, e + "spurious network error ({} tries remaining): {}", + self.remaining, + e.root_cause(), ); self.config.shell().warn(msg)?; self.remaining -= 1; @@ -34,30 +34,29 @@ } fn maybe_spurious(err: &Error) -> bool { - for e in err.chain() { - if let Some(git_err) = e.downcast_ref::() { - match git_err.class() { - git2::ErrorClass::Net | git2::ErrorClass::Os => return true, - _ => (), - } + if let Some(git_err) = err.downcast_ref::() { + match git_err.class() { + git2::ErrorClass::Net | git2::ErrorClass::Os => return true, + _ => (), } - if let Some(curl_err) = e.downcast_ref::() { - if curl_err.is_couldnt_connect() - || curl_err.is_couldnt_resolve_proxy() - || curl_err.is_couldnt_resolve_host() - || curl_err.is_operation_timedout() - || curl_err.is_recv_error() - || curl_err.is_http2_stream_error() - || curl_err.is_ssl_connect_error() - || curl_err.is_partial_file() - { - return true; - } + } + if let Some(curl_err) = err.downcast_ref::() { + if curl_err.is_couldnt_connect() + || curl_err.is_couldnt_resolve_proxy() + || curl_err.is_couldnt_resolve_host() + || curl_err.is_operation_timedout() + || curl_err.is_recv_error() + || curl_err.is_http2_error() + || curl_err.is_http2_stream_error() + || curl_err.is_ssl_connect_error() + || curl_err.is_partial_file() + { + return true; } - if let Some(not_200) = e.downcast_ref::() { - if 500 <= not_200.code && not_200.code < 600 { - return true; - } + } + if let Some(not_200) = err.downcast_ref::() { + if 500 <= not_200.code && not_200.code < 600 { + return true; } } false diff -Nru cargo-0.44.1/src/cargo/util/paths.rs cargo-0.47.0/src/cargo/util/paths.rs --- cargo-0.44.1/src/cargo/util/paths.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/paths.rs 2020-07-17 20:39:39.000000000 +0000 @@ -7,6 +7,7 @@ use std::path::{Component, Path, PathBuf}; use filetime::FileTime; +use tempfile::Builder as TempFileBuilder; use crate::util::errors::{CargoResult, CargoResultExt}; @@ -118,27 +119,12 @@ } pub fn read_bytes(path: &Path) -> CargoResult> { - let res = (|| -> CargoResult<_> { - let mut ret = Vec::new(); - let mut f = File::open(path)?; - if let Ok(m) = f.metadata() { - ret.reserve(m.len() as usize + 1); - } - f.read_to_end(&mut ret)?; - Ok(ret) - })() - .chain_err(|| format!("failed to read `{}`", path.display()))?; - Ok(res) + fs::read(path).chain_err(|| format!("failed to read `{}`", path.display())) } -pub fn write(path: &Path, contents: &[u8]) -> CargoResult<()> { - (|| -> CargoResult<()> { - let mut f = File::create(path)?; - f.write_all(contents)?; - Ok(()) - })() - .chain_err(|| format!("failed to write `{}`", path.display()))?; - Ok(()) +pub fn write, C: AsRef<[u8]>>(path: P, contents: C) -> CargoResult<()> { + let path = path.as_ref(); + fs::write(path, contents.as_ref()).chain_err(|| format!("failed to write `{}`", path.display())) } pub fn write_if_changed, C: AsRef<[u8]>>(path: P, contents: C) -> CargoResult<()> { @@ -177,6 +163,18 @@ Ok(()) } +/// Creates a new file. +pub fn create>(path: P) -> CargoResult { + let path = path.as_ref(); + File::create(path).chain_err(|| format!("failed to create file `{}`", path.display())) +} + +/// Opens an existing file. +pub fn open>(path: P) -> CargoResult { + let path = path.as_ref(); + File::open(path).chain_err(|| format!("failed to open file `{}`", path.display())) +} + pub fn mtime(path: &Path) -> CargoResult { let meta = fs::metadata(path).chain_err(|| format!("failed to stat `{}`", path.display()))?; Ok(FileTime::from_last_modification_time(&meta)) @@ -190,7 +188,7 @@ let timestamp = path.join("invoked.timestamp"); write( ×tamp, - b"This file has an mtime of when this was started.", + "This file has an mtime of when this was started.", )?; let ft = mtime(×tamp)?; log::debug!("invocation time for {:?} is {}", path, ft); @@ -280,7 +278,11 @@ } fn _remove_dir_all(p: &Path) -> CargoResult<()> { - if p.symlink_metadata()?.file_type().is_symlink() { + if p.symlink_metadata() + .chain_err(|| format!("could not get metadata for `{}` to remove", p.display()))? + .file_type() + .is_symlink() + { return remove_file(p); } let entries = p @@ -408,3 +410,139 @@ })?; Ok(()) } + +/// Copies a file from one location to another. +pub fn copy, Q: AsRef>(from: P, to: Q) -> CargoResult { + let from = from.as_ref(); + let to = to.as_ref(); + fs::copy(from, to) + .chain_err(|| format!("failed to copy `{}` to `{}`", from.display(), to.display())) +} + +/// Changes the filesystem mtime (and atime if possible) for the given file. +/// +/// This intentionally does not return an error, as this is sometimes not +/// supported on network filesystems. For the current uses in Cargo, this is a +/// "best effort" approach, and errors shouldn't be propagated. +pub fn set_file_time_no_err>(path: P, time: FileTime) { + let path = path.as_ref(); + match filetime::set_file_times(path, time, time) { + Ok(()) => log::debug!("set file mtime {} to {}", path.display(), time), + Err(e) => log::warn!( + "could not set mtime of {} to {}: {:?}", + path.display(), + time, + e + ), + } +} + +/// Strips `base` from `path`. +/// +/// This canonicalizes both paths before stripping. This is useful if the +/// paths are obtained in different ways, and one or the other may or may not +/// have been normalized in some way. +pub fn strip_prefix_canonical>( + path: P, + base: P, +) -> Result { + // Not all filesystems support canonicalize. Just ignore if it doesn't work. + let safe_canonicalize = |path: &Path| match path.canonicalize() { + Ok(p) => p, + Err(e) => { + log::warn!("cannot canonicalize {:?}: {:?}", path, e); + path.to_path_buf() + } + }; + let canon_path = safe_canonicalize(path.as_ref()); + let canon_base = safe_canonicalize(base.as_ref()); + canon_path.strip_prefix(canon_base).map(|p| p.to_path_buf()) +} + +/// Creates an excluded from cache directory atomically with its parents as needed. +/// +/// The atomicity only covers creating the leaf directory and exclusion from cache. Any missing +/// parent directories will not be created in an atomic manner. +/// +/// This function is idempotent and in addition to that it won't exclude ``p`` from cache if it +/// already exists. +pub fn create_dir_all_excluded_from_backups_atomic(p: impl AsRef) -> CargoResult<()> { + let path = p.as_ref(); + if path.is_dir() { + return Ok(()); + } + + let parent = path.parent().unwrap(); + let base = path.file_name().unwrap(); + create_dir_all(parent)?; + // We do this in two steps (first create a temporary directory and exlucde + // it from backups, then rename it to the desired name. If we created the + // directory directly where it should be and then excluded it from backups + // we would risk a situation where cargo is interrupted right after the directory + // creation but before the exclusion the the directory would remain non-excluded from + // backups because we only perform exclusion right after we created the directory + // ourselves. + // + // We need the tempdir created in parent instead of $TMP, because only then we can be + // easily sure that rename() will succeed (the new name needs to be on the same mount + // point as the old one). + let tempdir = TempFileBuilder::new().prefix(base).tempdir_in(parent)?; + exclude_from_backups(&tempdir.path()); + // Previously std::fs::create_dir_all() (through paths::create_dir_all()) was used + // here to create the directory directly and fs::create_dir_all() explicitly treats + // the directory being created concurrently by another thread or process as success, + // hence the check below to follow the existing behavior. If we get an error at + // rename() and suddently the directory (which didn't exist a moment earlier) exists + // we can infer from it it's another cargo process doing work. + if let Err(e) = fs::rename(tempdir.path(), path) { + if !path.exists() { + return Err(anyhow::Error::from(e)); + } + } + Ok(()) +} + +/// Marks the directory as excluded from archives/backups. +/// +/// This is recommended to prevent derived/temporary files from bloating backups. There are two +/// mechanisms used to achieve this right now: +/// +/// * A dedicated resource property excluding from Time Machine backups on macOS +/// * CACHEDIR.TAG files supported by various tools in a platform-independent way +fn exclude_from_backups(path: &Path) { + exclude_from_time_machine(path); + let _ = std::fs::write( + path.join("CACHEDIR.TAG"), + "Signature: 8a477f597d28d172789f06886806bc55 +# This file is a cache directory tag created by cargo. +# For information about cache directory tags see https://bford.info/cachedir/", + ); + // Similarly to exclude_from_time_machine() we ignore errors here as it's an optional feature. +} + +#[cfg(not(target_os = "macos"))] +fn exclude_from_time_machine(_: &Path) {} + +#[cfg(target_os = "macos")] +/// Marks files or directories as excluded from Time Machine on macOS +fn exclude_from_time_machine(path: &Path) { + use core_foundation::base::TCFType; + use core_foundation::{number, string, url}; + use std::ptr; + + // For compatibility with 10.7 a string is used instead of global kCFURLIsExcludedFromBackupKey + let is_excluded_key: Result = "NSURLIsExcludedFromBackupKey".parse(); + let path = url::CFURL::from_path(path, false); + if let (Some(path), Ok(is_excluded_key)) = (path, is_excluded_key) { + unsafe { + url::CFURLSetResourcePropertyForKey( + path.as_concrete_TypeRef(), + is_excluded_key.as_concrete_TypeRef(), + number::kCFBooleanTrue as *const _, + ptr::null_mut(), + ); + } + } + // Errors are ignored, since it's an optional feature and failure + // doesn't prevent Cargo from working +} diff -Nru cargo-0.44.1/src/cargo/util/process_builder.rs cargo-0.47.0/src/cargo/util/process_builder.rs --- cargo-0.44.1/src/cargo/util/process_builder.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/process_builder.rs 2020-07-17 20:39:39.000000000 +0000 @@ -6,6 +6,7 @@ use std::env; use std::ffi::{OsStr, OsString}; use std::fmt; +use std::iter::once; use std::path::Path; use std::process::{Command, Output, Stdio}; @@ -326,6 +327,37 @@ } command } + + /// Wraps an existing command with the provided wrapper, if it is present and valid. + /// + /// # Examples + /// + /// ```rust + /// use cargo::util::{ProcessBuilder, process}; + /// // Running this would execute `rustc` + /// let cmd: ProcessBuilder = process("rustc"); + /// + /// // Running this will execute `sccache rustc` + /// let cmd = cmd.wrapped(Some("sccache")); + /// ``` + pub fn wrapped(mut self, wrapper: Option>) -> Self { + let wrapper = if let Some(wrapper) = wrapper.as_ref() { + wrapper.as_ref() + } else { + return self; + }; + + if wrapper.is_empty() { + return self; + } + + let args = once(self.program).chain(self.args.into_iter()).collect(); + + self.program = wrapper.to_os_string(); + self.args = args; + + self + } } /// A helper function to create a `ProcessBuilder`. @@ -349,13 +381,11 @@ pub fn exec_replace(process_builder: &ProcessBuilder) -> CargoResult<()> { let mut command = process_builder.build_command(); let error = command.exec(); - Err(anyhow::Error::from(error) - .context(process_error( - &format!("could not execute process {}", process_builder), - None, - None, - )) - .into()) + Err(anyhow::Error::from(error).context(process_error( + &format!("could not execute process {}", process_builder), + None, + None, + ))) } } diff -Nru cargo-0.44.1/src/cargo/util/profile.rs cargo-0.47.0/src/cargo/util/profile.rs --- cargo-0.44.1/src/cargo/util/profile.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/profile.rs 2020-07-17 20:39:39.000000000 +0000 @@ -48,11 +48,7 @@ let duration = start.elapsed(); let duration_ms = duration.as_secs() * 1000 + u64::from(duration.subsec_millis()); - let msg = ( - stack_len, - duration_ms, - mem::replace(&mut self.desc, String::new()), - ); + let msg = (stack_len, duration_ms, mem::take(&mut self.desc)); MESSAGES.with(|msgs| msgs.borrow_mut().push(msg)); if stack_len == 0 { diff -Nru cargo-0.44.1/src/cargo/util/queue.rs cargo-0.47.0/src/cargo/util/queue.rs --- cargo-0.44.1/src/cargo/util/queue.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/queue.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,75 @@ +use std::collections::VecDeque; +use std::sync::{Condvar, Mutex}; +use std::time::Duration; + +/// A simple, threadsafe, queue of items of type `T` +/// +/// This is a sort of channel where any thread can push to a queue and any +/// thread can pop from a queue. +/// +/// This supports both bounded and unbounded operations. `push` will never block, +/// and allows the queue to grow without bounds. `push_bounded` will block if the +/// queue is over capacity, and will resume once there is enough capacity. +pub struct Queue { + state: Mutex>, + popper_cv: Condvar, + bounded_cv: Condvar, + bound: usize, +} + +struct State { + items: VecDeque, +} + +impl Queue { + pub fn new(bound: usize) -> Queue { + Queue { + state: Mutex::new(State { + items: VecDeque::new(), + }), + popper_cv: Condvar::new(), + bounded_cv: Condvar::new(), + bound, + } + } + + pub fn push(&self, item: T) { + self.state.lock().unwrap().items.push_back(item); + self.popper_cv.notify_one(); + } + + /// Pushes an item onto the queue, blocking if the queue is full. + pub fn push_bounded(&self, item: T) { + let locked_state = self.state.lock().unwrap(); + let mut state = self + .bounded_cv + .wait_while(locked_state, |s| s.items.len() >= self.bound) + .unwrap(); + state.items.push_back(item); + self.popper_cv.notify_one(); + } + + pub fn pop(&self, timeout: Duration) -> Option { + let (mut state, result) = self + .popper_cv + .wait_timeout_while(self.state.lock().unwrap(), timeout, |s| s.items.is_empty()) + .unwrap(); + if result.timed_out() { + None + } else { + let value = state.items.pop_front()?; + if state.items.len() < self.bound { + // Assumes threads cannot be canceled. + self.bounded_cv.notify_one(); + } + Some(value) + } + } + + pub fn try_pop_all(&self) -> Vec { + let mut state = self.state.lock().unwrap(); + let result = state.items.drain(..).collect(); + self.bounded_cv.notify_all(); + result + } +} diff -Nru cargo-0.44.1/src/cargo/util/restricted_names.rs cargo-0.47.0/src/cargo/util/restricted_names.rs --- cargo-0.44.1/src/cargo/util/restricted_names.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/restricted_names.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,94 @@ +//! Helpers for validating and checking names like package and crate names. + +use crate::util::CargoResult; +use anyhow::bail; +use std::path::Path; + +/// Returns `true` if the name contains non-ASCII characters. +pub fn is_non_ascii_name(name: &str) -> bool { + name.chars().any(|ch| ch > '\x7f') +} + +/// A Rust keyword. +pub fn is_keyword(name: &str) -> bool { + // See https://doc.rust-lang.org/reference/keywords.html + [ + "Self", "abstract", "as", "async", "await", "become", "box", "break", "const", "continue", + "crate", "do", "dyn", "else", "enum", "extern", "false", "final", "fn", "for", "if", + "impl", "in", "let", "loop", "macro", "match", "mod", "move", "mut", "override", "priv", + "pub", "ref", "return", "self", "static", "struct", "super", "trait", "true", "try", + "type", "typeof", "unsafe", "unsized", "use", "virtual", "where", "while", "yield", + ] + .contains(&name) +} + +/// These names cannot be used on Windows, even with an extension. +pub fn is_windows_reserved(name: &str) -> bool { + [ + "con", "prn", "aux", "nul", "com1", "com2", "com3", "com4", "com5", "com6", "com7", "com8", + "com9", "lpt1", "lpt2", "lpt3", "lpt4", "lpt5", "lpt6", "lpt7", "lpt8", "lpt9", + ] + .contains(&name.to_ascii_lowercase().as_str()) +} + +/// An artifact with this name will conflict with one of Cargo's build directories. +pub fn is_conflicting_artifact_name(name: &str) -> bool { + ["deps", "examples", "build", "incremental"].contains(&name) +} + +/// Check the base requirements for a package name. +/// +/// This can be used for other things than package names, to enforce some +/// level of sanity. Note that package names have other restrictions +/// elsewhere. `cargo new` has a few restrictions, such as checking for +/// reserved names. crates.io has even more restrictions. +pub fn validate_package_name(name: &str, what: &str, help: &str) -> CargoResult<()> { + let mut chars = name.chars(); + if let Some(ch) = chars.next() { + if ch.is_digit(10) { + // A specific error for a potentially common case. + bail!( + "the name `{}` cannot be used as a {}, \ + the name cannot start with a digit{}", + name, + what, + help + ); + } + if !(unicode_xid::UnicodeXID::is_xid_start(ch) || ch == '_') { + bail!( + "invalid character `{}` in {}: `{}`, \ + the first character must be a Unicode XID start character \ + (most letters or `_`){}", + ch, + what, + name, + help + ); + } + } + for ch in chars { + if !(unicode_xid::UnicodeXID::is_xid_continue(ch) || ch == '-') { + bail!( + "invalid character `{}` in {}: `{}`, \ + characters must be Unicode XID characters \ + (numbers, `-`, `_`, or most letters){}", + ch, + what, + name, + help + ); + } + } + Ok(()) +} + +// Check the entire path for names reserved in Windows. +pub fn is_windows_reserved_path(path: &Path) -> bool { + path.iter() + .filter_map(|component| component.to_str()) + .any(|component| { + let stem = component.split('.').next().unwrap(); + is_windows_reserved(stem) + }) +} diff -Nru cargo-0.44.1/src/cargo/util/rustc.rs cargo-0.47.0/src/cargo/util/rustc.rs --- cargo-0.44.1/src/cargo/util/rustc.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/rustc.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,17 +1,15 @@ -#![allow(deprecated)] // for SipHasher - use std::collections::hash_map::{Entry, HashMap}; use std::env; -use std::hash::{Hash, Hasher, SipHasher}; +use std::hash::{Hash, Hasher}; use std::path::{Path, PathBuf}; use std::sync::Mutex; use log::{debug, info, warn}; use serde::{Deserialize, Serialize}; -use crate::core::InternedString; +use crate::util::interning::InternedString; use crate::util::paths; -use crate::util::{self, profile, CargoResult, CargoResultExt, ProcessBuilder}; +use crate::util::{self, profile, CargoResult, CargoResultExt, ProcessBuilder, StableHasher}; /// Information on the `rustc` executable #[derive(Debug)] @@ -20,9 +18,13 @@ pub path: PathBuf, /// An optional program that will be passed the path of the rust exe as its first argument, and /// rustc args following this. - pub wrapper: Option, + pub wrapper: Option, + /// An optional wrapper to be used in addition to `rustc.wrapper` for workspace crates + pub workspace_wrapper: Option, /// Verbose version information (the output of `rustc -vV`) pub verbose_version: String, + /// The rustc version (`1.23.4-beta.2`), this comes from verbose_version. + pub version: semver::Version, /// The host triple (arch-platform-OS), this comes from verbose_version. pub host: InternedString, cache: Mutex, @@ -37,6 +39,7 @@ pub fn new( path: PathBuf, wrapper: Option, + workspace_wrapper: Option, rustup_rustc: &Path, cache_location: Option, ) -> CargoResult { @@ -48,44 +51,49 @@ cmd.arg("-vV"); let verbose_version = cache.cached_output(&cmd)?.0; - let host = { - let triple = verbose_version + let extract = |field: &str| -> CargoResult<&str> { + verbose_version .lines() - .find(|l| l.starts_with("host: ")) - .map(|l| &l[6..]) + .find(|l| l.starts_with(field)) + .map(|l| &l[field.len()..]) .ok_or_else(|| { anyhow::format_err!( - "`rustc -vV` didn't have a line for `host:`, got:\n{}", + "`rustc -vV` didn't have a line for `{}`, got:\n{}", + field.trim(), verbose_version ) - })?; - InternedString::new(triple) + }) }; + let host = InternedString::new(extract("host: ")?); + let version = semver::Version::parse(extract("release: ")?).chain_err(|| { + format!( + "rustc version does not appear to be a valid semver version, from:\n{}", + verbose_version + ) + })?; + Ok(Rustc { path, - wrapper: wrapper.map(util::process), + wrapper, + workspace_wrapper, verbose_version, + version, host, cache: Mutex::new(cache), }) } /// Gets a process builder set up to use the found rustc version, with a wrapper if `Some`. - pub fn process_with(&self, path: impl AsRef) -> ProcessBuilder { - match self.wrapper { - Some(ref wrapper) if !wrapper.get_program().is_empty() => { - let mut cmd = wrapper.clone(); - cmd.arg(path.as_ref()); - cmd - } - _ => util::process(path.as_ref()), - } + pub fn process(&self) -> ProcessBuilder { + util::process(self.path.as_path()).wrapped(self.wrapper.as_ref()) } /// Gets a process builder set up to use the found rustc version, with a wrapper if `Some`. - pub fn process(&self) -> ProcessBuilder { - self.process_with(&self.path) + pub fn workspace_process(&self) -> ProcessBuilder { + util::process(self.path.as_path()) + .wrapped(self.workspace_wrapper.as_ref()) + .wrapped(self.wrapper.as_ref()) } pub fn process_no_wrapper(&self) -> ProcessBuilder { @@ -95,10 +103,6 @@ pub fn cached_output(&self, cmd: &ProcessBuilder) -> CargoResult<(String, String)> { self.cache.lock().unwrap().cached_output(cmd) } - - pub fn set_wrapper(&mut self, wrapper: ProcessBuilder) { - self.wrapper = Some(wrapper); - } } /// It is a well known fact that `rustc` is not the fastest compiler in the @@ -216,7 +220,7 @@ } fn rustc_fingerprint(path: &Path, rustup_rustc: &Path) -> CargoResult { - let mut hasher = SipHasher::new(); + let mut hasher = StableHasher::new(); let path = paths::resolve_executable(path)?; path.hash(&mut hasher); @@ -260,7 +264,7 @@ } fn process_fingerprint(cmd: &ProcessBuilder) -> u64 { - let mut hasher = SipHasher::new(); + let mut hasher = StableHasher::new(); cmd.get_args().hash(&mut hasher); let mut env = cmd.get_envs().iter().collect::>(); env.sort_unstable(); diff -Nru cargo-0.44.1/src/cargo/util/sha256.rs cargo-0.47.0/src/cargo/util/sha256.rs --- cargo-0.44.1/src/cargo/util/sha256.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/sha256.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,4 +1,4 @@ -use crate::util::{CargoResult, CargoResultExt}; +use crate::util::{paths, CargoResult, CargoResultExt}; use crypto_hash::{Algorithm, Hasher}; use std::fs::File; use std::io::{self, Read, Write}; @@ -30,7 +30,7 @@ pub fn update_path>(&mut self, path: P) -> CargoResult<&mut Sha256> { let path = path.as_ref(); - let file = File::open(path)?; + let file = paths::open(path)?; self.update_file(&file) .chain_err(|| format!("failed to read `{}`", path.display()))?; Ok(self) diff -Nru cargo-0.44.1/src/cargo/util/toml/mod.rs cargo-0.47.0/src/cargo/util/toml/mod.rs --- cargo-0.44.1/src/cargo/util/toml/mod.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/toml/mod.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,6 +1,5 @@ use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::fmt; -use std::fs; use std::path::{Path, PathBuf}; use std::rc::Rc; use std::str; @@ -15,12 +14,15 @@ use url::Url; use crate::core::dependency::DepKind; -use crate::core::manifest::{LibKind, ManifestMetadata, TargetSourcePath, Warnings}; -use crate::core::{Dependency, InternedString, Manifest, PackageId, Summary, Target}; -use crate::core::{Edition, EitherManifest, Feature, Features, VirtualManifest}; +use crate::core::manifest::{ManifestMetadata, TargetSourcePath, Warnings}; +use crate::core::profiles::Strip; +use crate::core::resolver::ResolveBehavior; +use crate::core::{Dependency, Manifest, PackageId, Summary, Target}; +use crate::core::{Edition, EitherManifest, Feature, Features, VirtualManifest, Workspace}; use crate::core::{GitReference, PackageIdSpec, SourceId, WorkspaceConfig, WorkspaceRootConfig}; use crate::sources::{CRATES_IO_INDEX, CRATES_IO_REGISTRY}; use crate::util::errors::{CargoResult, CargoResultExt, ManifestError}; +use crate::util::interning::InternedString; use crate::util::{self, paths, validate_package_name, Config, IntoUrl}; mod targets; @@ -78,9 +80,9 @@ let (mut manifest, paths) = TomlManifest::to_real_manifest(&manifest, source_id, package_root, config)?; add_unused(manifest.warnings_mut()); - if !manifest.targets().iter().any(|t| !t.is_custom_build()) { + if manifest.targets().iter().all(|t| t.is_custom_build()) { bail!( - "no targets specified in the manifest\n \ + "no targets specified in the manifest\n\ either src/lib.rs, src/main.rs, a [lib] section, or \ [[bin]] section must be present" ) @@ -162,7 +164,7 @@ } let first_error = anyhow::Error::from(first_error); - Err(first_error.context("could not parse input as TOML").into()) + Err(first_error.context("could not parse input as TOML")) } type TomlLibTarget = TomlTarget; @@ -321,7 +323,7 @@ } else { Err(E::custom(format!( "must be an integer, `z`, or `s`, \ - but found: {}", + but found the string: \"{}\"", value ))) } @@ -407,6 +409,7 @@ pub build_override: Option>, pub dir_name: Option, pub inherits: Option, + pub strip: Option, } #[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)] @@ -522,6 +525,10 @@ ); } } + + if self.strip.is_some() { + features.require(Feature::strip())?; + } Ok(()) } @@ -641,6 +648,10 @@ if let Some(v) = &profile.dir_name { self.dir_name = Some(*v); } + + if let Some(v) = profile.strip { + self.strip = Some(v); + } } } @@ -798,7 +809,7 @@ description: Option, homepage: Option, documentation: Option, - readme: Option, + readme: Option, keywords: Option>, categories: Option>, license: Option, @@ -806,6 +817,7 @@ license_file: Option, repository: Option, metadata: Option, + resolver: Option, } #[derive(Debug, Deserialize, Serialize)] @@ -814,6 +826,8 @@ #[serde(rename = "default-members")] default_members: Option>, exclude: Option>, + metadata: Option, + resolver: Option, } impl TomlProject { @@ -837,9 +851,10 @@ impl TomlManifest { pub fn prepare_for_publish( &self, - config: &Config, + ws: &Workspace<'_>, package_root: &Path, ) -> CargoResult { + let config = ws.config(); let mut package = self .package .as_ref() @@ -847,6 +862,19 @@ .unwrap() .clone(); package.workspace = None; + let mut cargo_features = self.cargo_features.clone(); + package.resolver = ws.resolve_behavior().to_manifest(); + if package.resolver.is_some() { + // This should be removed when stabilizing. + match &mut cargo_features { + None => cargo_features = Some(vec!["resolver".to_string()]), + Some(feats) => { + if !feats.iter().any(|feat| feat == "resolver") { + feats.push("resolver".to_string()); + } + } + } + } if let Some(license_file) = &package.license_file { let license_path = Path::new(&license_file); let abs_license_path = paths::normalize_path(&package_root.join(license_path)); @@ -928,7 +956,7 @@ patch: None, workspace: None, badges: self.badges.clone(), - cargo_features: self.cargo_features.clone(), + cargo_features, }); fn map_deps( @@ -1016,6 +1044,25 @@ features.require(Feature::metabuild())?; } + if project.resolver.is_some() + || me + .workspace + .as_ref() + .map_or(false, |ws| ws.resolver.is_some()) + { + features.require(Feature::resolver())?; + } + let resolve_behavior = match ( + project.resolver.as_ref(), + me.workspace.as_ref().and_then(|ws| ws.resolver.as_ref()), + ) { + (None, None) => None, + (Some(s), None) | (None, Some(s)) => Some(ResolveBehavior::from_manifest(s)?), + (Some(_), Some(_)) => { + bail!("cannot specify `resolver` field in both `[workspace]` and `[package]`") + } + }; + // If we have no lib at all, use the inferred lib, if available. // If we have a lib with a path, we're done. // If we have a lib with no path, use the inferred lib or else the package name. @@ -1082,6 +1129,7 @@ }; for (n, v) in dependencies.iter() { let dep = v.to_dependency(n, cx, kind)?; + validate_package_name(dep.name_in_toml().as_str(), "dependency name", "")?; cx.deps.push(dep); } @@ -1146,25 +1194,28 @@ features.require(Feature::namespaced_features())?; } + let summary_features = me + .features + .as_ref() + .map(|x| { + x.iter() + .map(|(k, v)| (k.as_str(), v.iter().collect())) + .collect() + }) + .unwrap_or_else(BTreeMap::new); let summary = Summary::new( pkgid, deps, - &me.features - .as_ref() - .map(|x| { - x.iter() - .map(|(k, v)| (k.as_str(), v.iter().collect())) - .collect() - }) - .unwrap_or_else(BTreeMap::new), + &summary_features, project.links.as_deref(), project.namespaced_features.unwrap_or(false), )?; + let metadata = ManifestMetadata { description: project.description.clone(), homepage: project.homepage.clone(), documentation: project.documentation.clone(), - readme: project.readme.clone(), + readme: readme_for_project(package_root, project), authors: project.authors.clone().unwrap_or_default(), license: project.license.clone(), license_file: project.license_file.clone(), @@ -1181,6 +1232,7 @@ &config.members, &config.default_members, &config.exclude, + &config.metadata, )), (None, root) => WorkspaceConfig::Member { root: root.cloned(), @@ -1254,6 +1306,7 @@ project.default_run.clone(), Rc::clone(me), project.metabuild.clone().map(|sov| sov.0), + resolve_behavior, ); if project.license_file.is_some() && project.license.is_some() { manifest.warnings_mut().add_warning( @@ -1281,43 +1334,43 @@ config: &Config, ) -> CargoResult<(VirtualManifest, Vec)> { if me.project.is_some() { - bail!("virtual manifests do not define [project]"); + bail!("this virtual manifest specifies a [project] section, which is not allowed"); } if me.package.is_some() { - bail!("virtual manifests do not define [package]"); + bail!("this virtual manifest specifies a [package] section, which is not allowed"); } if me.lib.is_some() { - bail!("virtual manifests do not specify [lib]"); + bail!("this virtual manifest specifies a [lib] section, which is not allowed"); } if me.bin.is_some() { - bail!("virtual manifests do not specify [[bin]]"); + bail!("this virtual manifest specifies a [[bin]] section, which is not allowed"); } if me.example.is_some() { - bail!("virtual manifests do not specify [[example]]"); + bail!("this virtual manifest specifies a [[example]] section, which is not allowed"); } if me.test.is_some() { - bail!("virtual manifests do not specify [[test]]"); + bail!("this virtual manifest specifies a [[test]] section, which is not allowed"); } if me.bench.is_some() { - bail!("virtual manifests do not specify [[bench]]"); + bail!("this virtual manifest specifies a [[bench]] section, which is not allowed"); } if me.dependencies.is_some() { - bail!("virtual manifests do not specify [dependencies]"); + bail!("this virtual manifest specifies a [dependencies] section, which is not allowed"); } if me.dev_dependencies.is_some() || me.dev_dependencies2.is_some() { - bail!("virtual manifests do not specify [dev-dependencies]"); + bail!("this virtual manifest specifies a [dev-dependencies] section, which is not allowed"); } if me.build_dependencies.is_some() || me.build_dependencies2.is_some() { - bail!("virtual manifests do not specify [build-dependencies]"); + bail!("this virtual manifest specifies a [build-dependencies] section, which is not allowed"); } if me.features.is_some() { - bail!("virtual manifests do not specify [features]"); + bail!("this virtual manifest specifies a [features] section, which is not allowed"); } if me.target.is_some() { - bail!("virtual manifests do not specify [target]"); + bail!("this virtual manifest specifies a [target] section, which is not allowed"); } if me.badges.is_some() { - bail!("virtual manifests do not specify [badges]"); + bail!("this virtual manifest specifies a [badges] section, which is not allowed"); } let mut nested_paths = Vec::new(); @@ -1345,19 +1398,40 @@ if let Some(profiles) = &profiles { profiles.validate(&features, &mut warnings)?; } + if me + .workspace + .as_ref() + .map_or(false, |ws| ws.resolver.is_some()) + { + features.require(Feature::resolver())?; + } + let resolve_behavior = me + .workspace + .as_ref() + .and_then(|ws| ws.resolver.as_deref()) + .map(|r| ResolveBehavior::from_manifest(r)) + .transpose()?; let workspace_config = match me.workspace { Some(ref config) => WorkspaceConfig::Root(WorkspaceRootConfig::new( root, &config.members, &config.default_members, &config.exclude, + &config.metadata, )), None => { bail!("virtual manifests must be configured with [workspace]"); } }; Ok(( - VirtualManifest::new(replace, patch, workspace_config, profiles, features), + VirtualManifest::new( + replace, + patch, + workspace_config, + profiles, + features, + resolve_behavior, + ), nested_paths, )) } @@ -1438,11 +1512,12 @@ Some(StringOrBool::Bool(true)) => Some(build_rs), Some(StringOrBool::String(ref s)) => Some(PathBuf::from(s)), None => { - match fs::metadata(&build_rs) { - // If there is a `build.rs` file next to the `Cargo.toml`, assume it is - // a build script. - Ok(ref e) if e.is_file() => Some(build_rs), - Ok(_) | Err(_) => None, + // If there is a `build.rs` file next to the `Cargo.toml`, assume it is + // a build script. + if build_rs.is_file() { + Some(build_rs) + } else { + None } } } @@ -1453,6 +1528,32 @@ } } +/// Returns the name of the README file for a `TomlProject`. +fn readme_for_project(package_root: &Path, project: &TomlProject) -> Option { + match &project.readme { + None => default_readme_from_package_root(package_root), + Some(value) => match value { + StringOrBool::Bool(false) => None, + StringOrBool::Bool(true) => Some("README.md".to_string()), + StringOrBool::String(v) => Some(v.clone()), + }, + } +} + +const DEFAULT_README_FILES: [&str; 3] = ["README.md", "README.txt", "README"]; + +/// Checks if a file with any of the default README file names exists in the package root. +/// If so, returns a `String` representing that name. +fn default_readme_from_package_root(package_root: &Path) -> Option { + for &readme_filename in DEFAULT_README_FILES.iter() { + if package_root.join(readme_filename).is_file() { + return Some(readme_filename.to_string()); + } + } + + None +} + /// Checks a list of build targets, and ensures the target names are unique within a vector. /// If not, the name of the offending build target is returned. fn unique_build_targets(targets: &[Target], package_root: &Path) -> Result<(), String> { @@ -1591,6 +1692,17 @@ .or_else(|| self.rev.clone().map(GitReference::Rev)) .unwrap_or_else(|| GitReference::Branch("master".to_string())); let loc = git.into_url()?; + + if let Some(fragment) = loc.fragment() { + let msg = format!( + "URL fragment `#{}` in git URL is ignored for dependency ({}). \ + If you were trying to specify a specific git revision, \ + use `rev = \"{}\"` in the dependency declaration.", + fragment, name_in_toml, fragment + ); + cx.warnings.push(msg) + } + SourceId::for_git(&loc, reference)? } (None, Some(path), _, _) => { @@ -1624,7 +1736,7 @@ None => (name_in_toml, None), }; - let version = self.version.as_ref().map(|v| &v[..]); + let version = self.version.as_deref(); let mut dep = match cx.pkgid { Some(id) => Dependency::parse(pkg_name, version, new_source_id, id, cx.config)?, None => Dependency::parse_no_deprecated(pkg_name, version, new_source_id)?, @@ -1686,9 +1798,9 @@ doc: Option, plugin: Option, #[serde(rename = "proc-macro")] - proc_macro: Option, + proc_macro_raw: Option, #[serde(rename = "proc_macro")] - proc_macro2: Option, + proc_macro_raw2: Option, harness: Option, #[serde(rename = "required-features")] required_features: Option>, @@ -1743,7 +1855,7 @@ } fn proc_macro(&self) -> Option { - self.proc_macro.or(self.proc_macro2).or_else(|| { + self.proc_macro_raw.or(self.proc_macro_raw2).or_else(|| { if let Some(types) = self.crate_types() { if types.contains(&"proc-macro".to_string()) { return Some(true); diff -Nru cargo-0.44.1/src/cargo/util/toml/targets.rs cargo-0.47.0/src/cargo/util/toml/targets.rs --- cargo-0.44.1/src/cargo/util/toml/targets.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/toml/targets.rs 2020-07-17 20:39:39.000000000 +0000 @@ -15,11 +15,13 @@ use std::path::{Path, PathBuf}; use super::{ - LibKind, PathValue, StringOrBool, StringOrVec, TomlBenchTarget, TomlBinTarget, - TomlExampleTarget, TomlLibTarget, TomlManifest, TomlTarget, TomlTestTarget, + PathValue, StringOrBool, StringOrVec, TomlBenchTarget, TomlBinTarget, TomlExampleTarget, + TomlLibTarget, TomlManifest, TomlTarget, TomlTestTarget, }; -use crate::core::{compiler, Edition, Feature, Features, Target}; +use crate::core::compiler::CrateType; +use crate::core::{Edition, Feature, Features, Target}; use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::restricted_names; pub fn targets( features: &Features, @@ -171,7 +173,7 @@ None => return Ok(None), }; - validate_has_name(lib, "library", "lib")?; + validate_target_name(lib, "library", "lib", warnings)?; let path = match (lib.path.as_ref(), inferred) { (Some(path), _) => package_root.join(&path.0), @@ -221,15 +223,15 @@ if kinds.len() > 1 { anyhow::bail!("cannot mix `proc-macro` crate type with others"); } - vec![LibKind::ProcMacro] + vec![CrateType::ProcMacro] } (_, Some(true), Some(true)) => { anyhow::bail!("`lib.plugin` and `lib.proc-macro` cannot both be `true`") } (Some(kinds), _, _) => kinds.iter().map(|s| s.into()).collect(), - (None, Some(true), _) => vec![LibKind::Dylib], - (None, _, Some(true)) => vec![LibKind::ProcMacro], - (None, _, _) => vec![LibKind::Lib], + (None, Some(true), _) => vec![CrateType::Dylib], + (None, _, Some(true)) => vec![CrateType::ProcMacro], + (None, _, _) => vec![CrateType::Lib], }; let mut target = Target::lib_target(&lib.name(), crate_types, path, edition); @@ -263,7 +265,7 @@ ); for bin in &bins { - validate_has_name(bin, "binary", "bin")?; + validate_target_name(bin, "binary", "bin", warnings)?; let name = bin.name(); @@ -286,7 +288,7 @@ )); } - if compiler::is_bad_artifact_name(&name) { + if restricted_names::is_conflicting_artifact_name(&name) { anyhow::bail!("the binary target name `{}` is forbidden", name) } } @@ -528,7 +530,7 @@ ); for target in &toml_targets { - validate_has_name(target, target_kind_human, target_kind)?; + validate_target_name(target, target_kind_human, target_kind, warnings)?; } validate_unique_names(&toml_targets, target_kind)?; @@ -556,7 +558,7 @@ fn inferred_lib(package_root: &Path) -> Option { let lib = package_root.join("src").join("lib.rs"); - if fs::metadata(&lib).is_ok() { + if lib.exists() { Some(lib) } else { None @@ -719,16 +721,24 @@ .collect() } -fn validate_has_name( +fn validate_target_name( target: &TomlTarget, target_kind_human: &str, target_kind: &str, + warnings: &mut Vec, ) -> CargoResult<()> { match target.name { Some(ref name) => { if name.trim().is_empty() { anyhow::bail!("{} target names cannot be empty", target_kind_human) } + if cfg!(windows) && restricted_names::is_windows_reserved(name) { + warnings.push(format!( + "{} target `{}` is a reserved Windows filename, \ + this target will not work on Windows platforms", + target_kind_human, name + )); + } } None => anyhow::bail!( "{} target {}.name is required", @@ -764,7 +774,7 @@ .set_doctest(toml.doctest.unwrap_or_else(|| t2.doctested())) .set_benched(toml.bench.unwrap_or_else(|| t2.benched())) .set_harness(toml.harness.unwrap_or_else(|| t2.harness())) - .set_proc_macro(toml.proc_macro.unwrap_or_else(|| t2.proc_macro())) + .set_proc_macro(toml.proc_macro().unwrap_or_else(|| t2.proc_macro())) .set_for_host(match (toml.plugin, toml.proc_macro()) { (None, None) => t2.for_host(), (Some(true), _) | (_, Some(true)) => true, diff -Nru cargo-0.44.1/src/cargo/util/workspace.rs cargo-0.47.0/src/cargo/util/workspace.rs --- cargo-0.44.1/src/cargo/util/workspace.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/cargo/util/workspace.rs 2020-07-17 20:39:39.000000000 +0000 @@ -7,7 +7,7 @@ fn get_available_targets<'a>( filter_fn: fn(&Target) -> bool, ws: &'a Workspace<'_>, - options: &'a CompileOptions<'_>, + options: &'a CompileOptions, ) -> CargoResult> { let packages = options.spec.get_packages(ws)?; @@ -29,7 +29,7 @@ fn print_available( filter_fn: fn(&Target) -> bool, ws: &Workspace<'_>, - options: &CompileOptions<'_>, + options: &CompileOptions, option_name: &str, plural_name: &str, ) -> CargoResult<()> { @@ -49,27 +49,18 @@ bail!("{}", output) } -pub fn print_available_examples( - ws: &Workspace<'_>, - options: &CompileOptions<'_>, -) -> CargoResult<()> { +pub fn print_available_examples(ws: &Workspace<'_>, options: &CompileOptions) -> CargoResult<()> { print_available(Target::is_example, ws, options, "--example", "examples") } -pub fn print_available_binaries( - ws: &Workspace<'_>, - options: &CompileOptions<'_>, -) -> CargoResult<()> { +pub fn print_available_binaries(ws: &Workspace<'_>, options: &CompileOptions) -> CargoResult<()> { print_available(Target::is_bin, ws, options, "--bin", "binaries") } -pub fn print_available_benches( - ws: &Workspace<'_>, - options: &CompileOptions<'_>, -) -> CargoResult<()> { +pub fn print_available_benches(ws: &Workspace<'_>, options: &CompileOptions) -> CargoResult<()> { print_available(Target::is_bench, ws, options, "--bench", "benches") } -pub fn print_available_tests(ws: &Workspace<'_>, options: &CompileOptions<'_>) -> CargoResult<()> { +pub fn print_available_tests(ws: &Workspace<'_>, options: &CompileOptions) -> CargoResult<()> { print_available(Target::is_test, ws, options, "--test", "tests") } diff -Nru cargo-0.44.1/src/doc/asciidoc-extension.rb cargo-0.47.0/src/doc/asciidoc-extension.rb --- cargo-0.44.1/src/doc/asciidoc-extension.rb 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/asciidoc-extension.rb 2020-07-17 20:39:39.000000000 +0000 @@ -84,6 +84,20 @@ end end +# Man pages are ASCII only. Unfortunately asciidoc doesn't process these +# characters for us. The `cargo tree` manpage needs a little assistance. +class SpecialCharPostprocessor < Extensions::Postprocessor + def process document, output + if document.basebackend? 'manpage' + output = output.gsub(/│/, '|') + .gsub(/├/, '|') + .gsub(/└/, '`') + .gsub(/─/, '\-') + end + output + end +end + # General utility for converting text. Example: # # convert:lowercase[{somevar}] @@ -107,4 +121,5 @@ inline_macro LinkCargoInlineMacro inline_macro ConvertInlineMacro postprocessor MonoPostprocessor + postprocessor SpecialCharPostprocessor end diff -Nru cargo-0.44.1/src/doc/man/cargo.adoc cargo-0.47.0/src/doc/man/cargo.adoc --- cargo-0.44.1/src/doc/man/cargo.adoc 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/cargo.adoc 2020-07-17 20:39:39.000000000 +0000 @@ -71,6 +71,9 @@ man:cargo-pkgid[1]:: Print a fully qualified package specification. +man:cargo-tree[1]:: + Display a tree visualization of a dependency graph. + man:cargo-update[1]:: Update dependencies as recorded in the local lock file. @@ -166,16 +169,16 @@ Binaries installed by man:cargo-install[1] will be located here. If using rustup, executables distributed with Rust are also located here. -`$CARGO_HOME/config`:: +`$CARGO_HOME/config.toml`:: The global configuration file. See linkcargo:reference/config.html[the reference] for more information about configuration files. -`.cargo/config`:: - Cargo automatically searches for a file named `.cargo/config` in the +`.cargo/config.toml`:: + Cargo automatically searches for a file named `.cargo/config.toml` in the current directory, and all parent directories. These configuration files will be merged with the global configuration file. -`$CARGO_HOME/credentials`:: +`$CARGO_HOME/credentials.toml`:: Private authentication information for logging in to a registry. `$CARGO_HOME/registry/`:: diff -Nru cargo-0.44.1/src/doc/man/cargo-bench.adoc cargo-0.47.0/src/doc/man/cargo-bench.adoc --- cargo-0.44.1/src/doc/man/cargo-bench.adoc 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/cargo-bench.adoc 2020-07-17 20:39:39.000000000 +0000 @@ -36,6 +36,16 @@ manifest settings, in which case your code will need to provide its own `main` function to handle running benchmarks. + +> **Note**: The +> link:https://doc.rust-lang.org/nightly/unstable-book/library-features/test.html[`#[bench\]` attribute] +> is currently unstable and only available on the +> link:https://doc.rust-lang.org/book/appendix-07-nightly-rust.html[nightly channel]. +> There are some packages available on +> link:https://crates.io/keywords/benchmark[crates.io] that may help with +> running benchmarks on the stable channel, such as +> link:https://crates.io/crates/criterion[Criterion]. + == OPTIONS === Benchmark Options diff -Nru cargo-0.44.1/src/doc/man/cargo-install.adoc cargo-0.47.0/src/doc/man/cargo-install.adoc --- cargo-0.44.1/src/doc/man/cargo-install.adoc 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/cargo-install.adoc 2020-07-17 20:39:39.000000000 +0000 @@ -135,12 +135,16 @@ include::options-registry.adoc[] +include::options-index.adoc[] + include::options-features.adoc[] === Compilation Options include::options-target-triple.adoc[] +include::options-target-dir.adoc[] + *--debug*:: Build with the `dev` profile instead the `release` profile. diff -Nru cargo-0.44.1/src/doc/man/cargo-login.adoc cargo-0.47.0/src/doc/man/cargo-login.adoc --- cargo-0.44.1/src/doc/man/cargo-login.adoc 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/cargo-login.adoc 2020-07-17 20:39:39.000000000 +0000 @@ -14,7 +14,7 @@ This command will save the API token to disk so that commands that require authentication, such as man:cargo-publish[1], will be automatically -authenticated. The token is saved in `$CARGO_HOME/credentials`. `CARGO_HOME` +authenticated. The token is saved in `$CARGO_HOME/credentials.toml`. `CARGO_HOME` defaults to `.cargo` in your home directory. If the _TOKEN_ argument is not specified, it will be read from stdin. diff -Nru cargo-0.44.1/src/doc/man/cargo-metadata.adoc cargo-0.47.0/src/doc/man/cargo-metadata.adoc --- cargo-0.44.1/src/doc/man/cargo-metadata.adoc 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/cargo-metadata.adoc 2020-07-17 20:39:39.000000000 +0000 @@ -268,6 +268,15 @@ "version": 1, /* The absolute path to the root of the workspace. */ "workspace_root": "/path/to/my-package" + /* Workspace metadata. + This is null if no metadata is specified. */ + "metadata": { + "docs": { + "rs": { + "all-features": true + } + } + } } ---- diff -Nru cargo-0.44.1/src/doc/man/cargo-tree.adoc cargo-0.47.0/src/doc/man/cargo-tree.adoc --- cargo-0.44.1/src/doc/man/cargo-tree.adoc 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/doc/man/cargo-tree.adoc 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,227 @@ += cargo-tree(1) +:idprefix: cargo_tree_ +:doctype: manpage +:actionverb: Display +:noall: true + +== NAME + +cargo-tree - Display a tree visualization of a dependency graph + +== SYNOPSIS + +`cargo tree [_OPTIONS_]` + +== DESCRIPTION + +This command will display a tree of dependencies to the terminal. An example +of a simple project that depends on the "rand" package: + +---- +myproject v0.1.0 (/myproject) +└── rand v0.7.3 + ├── getrandom v0.1.14 + │ ├── cfg-if v0.1.10 + │ └── libc v0.2.68 + ├── libc v0.2.68 (*) + ├── rand_chacha v0.2.2 + │ ├── ppv-lite86 v0.2.6 + │ └── rand_core v0.5.1 + │ └── getrandom v0.1.14 (*) + └── rand_core v0.5.1 (*) +[build-dependencies] +└── cc v1.0.50 +---- + +Packages marked with `(*)` have been "de-duplicated". The dependencies for the +package have already been shown elswhere in the graph, and so are not +repeated. Use the `--no-dedupe` option to repeat the duplicates. + +The `-e` flag can be used to select the dependency kinds to display. The +"features" kind changes the output to display the features enabled by +each dependency. For example, `cargo tree -e features`: + +---- +myproject v0.1.0 (/myproject) +└── log feature "serde" + └── log v0.4.8 + ├── serde v1.0.106 + └── cfg-if feature "default" + └── cfg-if v0.1.10 +---- + +In this tree, `myproject` depends on `log` with the `serde` feature. `log` in +turn depends on `cfg-if` with "default" features. When using `-e features` it +can be helpful to use `-i` flag to show how the features flow into a package. +See the examples below for more detail. + +== OPTIONS + +=== Tree Options + +*-i* _SPEC_:: +*--invert* _SPEC_:: + Show the reverse dependencies for the given package. This flag will invert + the tree and display the packages that depend on the given package. ++ +Note that in a workspace, by default it will only display the package's +reverse dependencies inside the tree of the workspace member in the current +directory. The `--workspace` flag can be used to extend it so that it will +show the package's reverse dependencies across the entire workspace. The `-p` +flag can be used to display the package's reverse dependencies only with the +subtree of the package given to `-p`. + +*--no-dedupe*:: + Do not de-duplicate repeated dependencies. Usually, when a package has + already displayed its dependencies, further occurrences will not + re-display its dependencies, and will include a `(*)` to indicate it has + already been shown. This flag will cause those duplicates to be repeated. + +*-d*:: +*--duplicates*:: + Show only dependencies which come in multiple versions (implies + `--invert`). When used with the `-p` flag, only shows duplicates within + the subtree of the given package. ++ +It can be beneficial for build times and executable sizes to avoid building +that same package multiple times. This flag can help identify the offending +packages. You can then investigate if the package that depends on the +duplicate with the older version can be updated to the newer version so that +only one instance is built. + +*-e* _KINDS_:: +*--edges* _KINDS_:: + The dependency kinds to display. Takes a comma separated list of values: ++ + - `all` — Show all edge kinds. + - `normal` — Show normal dependencies. + - `build` — Show build dependencies. + - `dev` — Show development dependencies. + - `features` — Show features enabled by each dependency. If this is + the only kind given, then it will automatically include the other + dependency kinds. + - `no-normal` — Do not include normal dependencies. + - `no-build` — Do not include build dependencies. + - `no-dev` — Do not include development dependencies. ++ +The `no-` prefixed options cannot be mixed with the other dependency kinds. ++ +The default is `normal,build,dev`. + +*--target* _TRIPLE_:: + Filter dependencies matching the given target-triple. + The default is the host platform. Use the value `all` to include *all* + targets. + +=== Tree Formatting Options + +*--charset* _CHARSET_:: + Chooses the character set to use for the tree. Valid values are "utf8" or + "ascii". Default is "utf8". + +*-f* _FORMAT_:: +*--format* _FORMAT_:: + Set the format string for each package. The default is "{p}". ++ +This is an arbitrary string which will be used to display each package. The following +strings will be replaced with the corresponding value: ++ +- `{p}` — The package name. +- `{l}` — The package license. +- `{r}` — The package repository URL. +- `{f}` — Comma-separated list of package features that are enabled. + +*--prefix* _PREFIX_:: + Sets how each line is displayed. The _PREFIX_ value can be one of: ++ +- `indent` (default) — Shows each line indented as a tree. +- `depth` — Show as a list, with the numeric depth printed before each entry. +- `none` — Show as a flat list. + +=== Package Selection + +include::options-packages.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-features.adoc[] + +=== Display Options + +include::options-display.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::options-locked.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Display the tree for the package in the current directory: + + cargo tree + +. Display all the packages that depend on the `syn` package: + + cargo tree -i syn + +. Show the features enabled on each package: + + cargo tree --format "{p} {f}" + +. Show all packages that are built multiple times. This can happen if multiple + semver-incompatible versions appear in the tree (like 1.0.0 and 2.0.0). + + cargo tree -d + +. Explain why features are enabled for the `syn` package: + + cargo tree -e features -i syn ++ +The `-e features` flag is used to show features. The `-i` flag is used to +invert the graph so that it displays the packages that depend on `syn`. An +example of what this would display: ++ +---- +syn v1.0.17 +├── syn feature "clone-impls" +│ └── syn feature "default" +│ └── rustversion v1.0.2 +│ └── rustversion feature "default" +│ └── myproject v0.1.0 (/myproject) +│ └── myproject feature "default" (command-line) +├── syn feature "default" (*) +├── syn feature "derive" +│ └── syn feature "default" (*) +├── syn feature "full" +│ └── rustversion v1.0.2 (*) +├── syn feature "parsing" +│ └── syn feature "default" (*) +├── syn feature "printing" +│ └── syn feature "default" (*) +├── syn feature "proc-macro" +│ └── syn feature "default" (*) +└── syn feature "quote" + ├── syn feature "printing" (*) + └── syn feature "proc-macro" (*) +---- ++ +To read this graph, you can follow the chain for each feature from the root to +see why it is included. For example, the "full" feature is added by the +`rustversion` crate which is included from `myproject` (with the default +features), and `myproject` is the package selected on the command-line. All +of the other `syn` features are added by the "default" feature ("quote" is +added by "printing" and "proc-macro", both of which are default features). ++ +If you're having difficulty cross-referencing the de-duplicated `(*)` entries, +try with the `--no-dedupe` flag to get the full output. + +== SEE ALSO +man:cargo[1], man:cargo-metadata[1] diff -Nru cargo-0.44.1/src/doc/man/cargo-vendor.adoc cargo-0.47.0/src/doc/man/cargo-vendor.adoc --- cargo-0.44.1/src/doc/man/cargo-vendor.adoc 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/cargo-vendor.adoc 2020-07-17 20:39:39.000000000 +0000 @@ -19,11 +19,11 @@ specified with the `-s` option. The `cargo vendor` command will also print out the configuration necessary -to use the vendored sources, which you will need to add to `.cargo/config`. +to use the vendored sources, which you will need to add to `.cargo/config.toml`. == OPTIONS -=== Owner Options +=== Vendor Options *-s* _MANIFEST_:: *--sync* _MANIFEST_:: @@ -35,7 +35,7 @@ existing contents of the vendor directory *--respect-source-config*:: - Instead of ignoring `[source]` configuration by default in `.cargo/config` + Instead of ignoring `[source]` configuration by default in `.cargo/config.toml` read it and use it when downloading crates from crates.io, for example *--versioned-dirs*:: diff -Nru cargo-0.44.1/src/doc/man/cargo-yank.adoc cargo-0.47.0/src/doc/man/cargo-yank.adoc --- cargo-0.44.1/src/doc/man/cargo-yank.adoc 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/cargo-yank.adoc 2020-07-17 20:39:39.000000000 +0000 @@ -28,7 +28,7 @@ == OPTIONS -=== Owner Options +=== Yank Options *--vers* _VERSION_:: The version to yank or un-yank. diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-bench.html cargo-0.47.0/src/doc/man/generated/cargo-bench.html --- cargo-0.44.1/src/doc/man/generated/cargo-bench.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-bench.html 2020-07-17 20:39:39.000000000 +0000 @@ -42,6 +42,20 @@ manifest settings, in which case your code will need to provide its own main function to handle running benchmarks.

+
+
+
+

Note: The +#[bench] attribute +is currently unstable and only available on the +nightly channel. +There are some packages available on +crates.io that may help with +running benchmarks on the stable channel, such as +Criterion.

+
+
+
@@ -405,6 +419,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-build.html cargo-0.47.0/src/doc/man/generated/cargo-build.html --- cargo-0.44.1/src/doc/man/generated/cargo-build.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-build.html 2020-07-17 20:39:39.000000000 +0000 @@ -350,6 +350,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-check.html cargo-0.47.0/src/doc/man/generated/cargo-check.html --- cargo-0.44.1/src/doc/man/generated/cargo-check.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-check.html 2020-07-17 20:39:39.000000000 +0000 @@ -341,6 +341,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-clean.html cargo-0.47.0/src/doc/man/generated/cargo-clean.html --- cargo-0.44.1/src/doc/man/generated/cargo-clean.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-clean.html 2020-07-17 20:39:39.000000000 +0000 @@ -170,6 +170,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-doc.html cargo-0.47.0/src/doc/man/generated/cargo-doc.html --- cargo-0.44.1/src/doc/man/generated/cargo-doc.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-doc.html 2020-07-17 20:39:39.000000000 +0000 @@ -311,6 +311,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-fetch.html cargo-0.47.0/src/doc/man/generated/cargo-fetch.html --- cargo-0.44.1/src/doc/man/generated/cargo-fetch.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-fetch.html 2020-07-17 20:39:39.000000000 +0000 @@ -147,6 +147,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-fix.html cargo-0.47.0/src/doc/man/generated/cargo-fix.html --- cargo-0.44.1/src/doc/man/generated/cargo-fix.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-fix.html 2020-07-17 20:39:39.000000000 +0000 @@ -417,6 +417,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-generate-lockfile.html cargo-0.47.0/src/doc/man/generated/cargo-generate-lockfile.html --- cargo-0.44.1/src/doc/man/generated/cargo-generate-lockfile.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-generate-lockfile.html 2020-07-17 20:39:39.000000000 +0000 @@ -115,6 +115,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo.html cargo-0.47.0/src/doc/man/generated/cargo.html --- cargo-0.44.1/src/doc/man/generated/cargo.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo.html 2020-07-17 20:39:39.000000000 +0000 @@ -98,6 +98,10 @@

Print a fully qualified package specification.

+
cargo-tree(1)
+
+

Display a tree visualization of a dependency graph.

+
cargo-update(1)

Update dependencies as recorded in the local lock file.

@@ -293,6 +297,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
@@ -350,18 +362,18 @@

Binaries installed by cargo-install(1) will be located here. If using rustup, executables distributed with Rust are also located here.

-
$CARGO_HOME/config
+
$CARGO_HOME/config.toml

The global configuration file. See the reference for more information about configuration files.

-
.cargo/config
+
.cargo/config.toml
-

Cargo automatically searches for a file named .cargo/config in the +

Cargo automatically searches for a file named .cargo/config.toml in the current directory, and all parent directories. These configuration files will be merged with the global configuration file.

-
$CARGO_HOME/credentials
+
$CARGO_HOME/credentials.toml

Private authentication information for logging in to a registry.

diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-init.html cargo-0.47.0/src/doc/man/generated/cargo-init.html --- cargo-0.44.1/src/doc/man/generated/cargo-init.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-init.html 2020-07-17 20:39:39.000000000 +0000 @@ -187,6 +187,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-install.html cargo-0.47.0/src/doc/man/generated/cargo-install.html --- cargo-0.44.1/src/doc/man/generated/cargo-install.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-install.html 2020-07-17 20:39:39.000000000 +0000 @@ -197,6 +197,10 @@ If not specified, the default registry is used, which is defined by the registry.default config key which defaults to crates-io.

+
--index INDEX
+
+

The URL of the registry index to use.

+
@@ -254,6 +258,13 @@ build cache documentation for more details.

+
--target-dir DIRECTORY
+
+

Directory for all generated artifacts and intermediate files. May also be +specified with the CARGO_TARGET_DIR environment variable, or the +build.target-dir config value. Defaults +to target in the root of the workspace.

+
--debug

Build with the dev profile instead the release profile.

@@ -358,6 +369,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-locate-project.html cargo-0.47.0/src/doc/man/generated/cargo-locate-project.html --- cargo-0.44.1/src/doc/man/generated/cargo-locate-project.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-locate-project.html 2020-07-17 20:39:39.000000000 +0000 @@ -84,6 +84,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-login.html cargo-0.47.0/src/doc/man/generated/cargo-login.html --- cargo-0.44.1/src/doc/man/generated/cargo-login.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-login.html 2020-07-17 20:39:39.000000000 +0000 @@ -16,7 +16,7 @@

This command will save the API token to disk so that commands that require authentication, such as cargo-publish(1), will be automatically -authenticated. The token is saved in $CARGO_HOME/credentials. CARGO_HOME +authenticated. The token is saved in $CARGO_HOME/credentials.toml. CARGO_HOME defaults to .cargo in your home directory.

@@ -92,6 +92,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-metadata.html cargo-0.47.0/src/doc/man/generated/cargo-metadata.html --- cargo-0.44.1/src/doc/man/generated/cargo-metadata.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-metadata.html 2020-07-17 20:39:39.000000000 +0000 @@ -275,6 +275,15 @@ "version": 1, /* The absolute path to the root of the workspace. */ "workspace_root": "/path/to/my-package" + /* Workspace metadata. + This is null if no metadata is specified. */ + "metadata": { + "docs": { + "rs": { + "all-features": true + } + } + } }
@@ -432,6 +441,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-new.html cargo-0.47.0/src/doc/man/generated/cargo-new.html --- cargo-0.44.1/src/doc/man/generated/cargo-new.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-new.html 2020-07-17 20:39:39.000000000 +0000 @@ -180,6 +180,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-owner.html cargo-0.47.0/src/doc/man/generated/cargo-owner.html --- cargo-0.44.1/src/doc/man/generated/cargo-owner.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-owner.html 2020-07-17 20:39:39.000000000 +0000 @@ -128,6 +128,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-package.html cargo-0.47.0/src/doc/man/generated/cargo-package.html --- cargo-0.44.1/src/doc/man/generated/cargo-package.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-package.html 2020-07-17 20:39:39.000000000 +0000 @@ -278,6 +278,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-pkgid.html cargo-0.47.0/src/doc/man/generated/cargo-pkgid.html --- cargo-0.44.1/src/doc/man/generated/cargo-pkgid.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-pkgid.html 2020-07-17 20:39:39.000000000 +0000 @@ -174,6 +174,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-publish.html cargo-0.47.0/src/doc/man/generated/cargo-publish.html --- cargo-0.44.1/src/doc/man/generated/cargo-publish.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-publish.html 2020-07-17 20:39:39.000000000 +0000 @@ -262,6 +262,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-run.html cargo-0.47.0/src/doc/man/generated/cargo-run.html --- cargo-0.44.1/src/doc/man/generated/cargo-run.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-run.html 2020-07-17 20:39:39.000000000 +0000 @@ -263,6 +263,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-rustc.html cargo-0.47.0/src/doc/man/generated/cargo-rustc.html --- cargo-0.44.1/src/doc/man/generated/cargo-rustc.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-rustc.html 2020-07-17 20:39:39.000000000 +0000 @@ -324,6 +324,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-rustdoc.html cargo-0.47.0/src/doc/man/generated/cargo-rustdoc.html --- cargo-0.44.1/src/doc/man/generated/cargo-rustdoc.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-rustdoc.html 2020-07-17 20:39:39.000000000 +0000 @@ -339,6 +339,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-search.html cargo-0.47.0/src/doc/man/generated/cargo-search.html --- cargo-0.44.1/src/doc/man/generated/cargo-search.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-search.html 2020-07-17 20:39:39.000000000 +0000 @@ -90,6 +90,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-test.html cargo-0.47.0/src/doc/man/generated/cargo-test.html --- cargo-0.44.1/src/doc/man/generated/cargo-test.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-test.html 2020-07-17 20:39:39.000000000 +0000 @@ -439,6 +439,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-tree.html cargo-0.47.0/src/doc/man/generated/cargo-tree.html --- cargo-0.44.1/src/doc/man/generated/cargo-tree.html 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-tree.html 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,516 @@ +

NAME

+
+

cargo-tree - Display a tree visualization of a dependency graph

+
+
+

SYNOPSIS

+
+
+

cargo tree [OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

This command will display a tree of dependencies to the terminal. An example +of a simple project that depends on the "rand" package:

+
+
+
+
myproject v0.1.0 (/myproject)
+└── rand v0.7.3
+    ├── getrandom v0.1.14
+    │   ├── cfg-if v0.1.10
+    │   └── libc v0.2.68
+    ├── libc v0.2.68 (*)
+    ├── rand_chacha v0.2.2
+    │   ├── ppv-lite86 v0.2.6
+    │   └── rand_core v0.5.1
+    │       └── getrandom v0.1.14 (*)
+    └── rand_core v0.5.1 (*)
+[build-dependencies]
+└── cc v1.0.50
+
+
+
+

Packages marked with (*) have been "de-duplicated". The dependencies for the +package have already been shown elswhere in the graph, and so are not +repeated. Use the --no-dedupe option to repeat the duplicates.

+
+
+

The -e flag can be used to select the dependency kinds to display. The +"features" kind changes the output to display the features enabled by +each dependency. For example, cargo tree -e features:

+
+
+
+
myproject v0.1.0 (/myproject)
+└── log feature "serde"
+    └── log v0.4.8
+        ├── serde v1.0.106
+        └── cfg-if feature "default"
+            └── cfg-if v0.1.10
+
+
+
+

In this tree, myproject depends on log with the serde feature. log in +turn depends on cfg-if with "default" features. When using -e features it +can be helpful to use -i flag to show how the features flow into a package. +See the examples below for more detail.

+
+
+
+
+

OPTIONS

+
+
+

Tree Options

+
+
+
-i SPEC
+
--invert SPEC
+
+

Show the reverse dependencies for the given package. This flag will invert +the tree and display the packages that depend on the given package.

+
+

Note that in a workspace, by default it will only display the package’s +reverse dependencies inside the tree of the workspace member in the current +directory. The --workspace flag can be used to extend it so that it will +show the package’s reverse dependencies across the entire workspace. The -p +flag can be used to display the package’s reverse dependencies only with the +subtree of the package given to -p.

+
+
+
--no-dedupe
+
+

Do not de-duplicate repeated dependencies. Usually, when a package has +already displayed its dependencies, further occurrences will not +re-display its dependencies, and will include a (*) to indicate it has +already been shown. This flag will cause those duplicates to be repeated.

+
+
-d
+
--duplicates
+
+

Show only dependencies which come in multiple versions (implies +--invert). When used with the -p flag, only shows duplicates within +the subtree of the given package.

+
+

It can be beneficial for build times and executable sizes to avoid building +that same package multiple times. This flag can help identify the offending +packages. You can then investigate if the package that depends on the +duplicate with the older version can be updated to the newer version so that +only one instance is built.

+
+
+
-e KINDS
+
--edges KINDS
+
+

The dependency kinds to display. Takes a comma separated list of values:

+
+
    +
  • +

    all — Show all edge kinds.

    +
  • +
  • +

    normal — Show normal dependencies.

    +
  • +
  • +

    build — Show build dependencies.

    +
  • +
  • +

    dev — Show development dependencies.

    +
  • +
  • +

    features — Show features enabled by each dependency. If this is +the only kind given, then it will automatically include the other +dependency kinds.

    +
  • +
  • +

    no-normal — Do not include normal dependencies.

    +
  • +
  • +

    no-build — Do not include build dependencies.

    +
  • +
  • +

    no-dev — Do not include development dependencies.

    +
  • +
+
+
+

The no- prefixed options cannot be mixed with the other dependency kinds.

+
+
+

The default is normal,build,dev.

+
+
+
--target TRIPLE
+
+

Filter dependencies matching the given target-triple. +The default is the host platform. Use the value all to include all +targets.

+
+
+
+
+
+

Tree Formatting Options

+
+
+
--charset CHARSET
+
+

Chooses the character set to use for the tree. Valid values are "utf8" or +"ascii". Default is "utf8".

+
+
-f FORMAT
+
--format FORMAT
+
+

Set the format string for each package. The default is "{p}".

+
+

This is an arbitrary string which will be used to display each package. The following +strings will be replaced with the corresponding value:

+
+
+
    +
  • +

    {p} — The package name.

    +
  • +
  • +

    {l} — The package license.

    +
  • +
  • +

    {r} — The package repository URL.

    +
  • +
  • +

    {f} — Comma-separated list of package features that are enabled.

    +
  • +
+
+
+
--prefix PREFIX
+
+

Sets how each line is displayed. The PREFIX value can be one of:

+
+
    +
  • +

    indent (default) — Shows each line indented as a tree.

    +
  • +
  • +

    depth — Show as a list, with the numeric depth printed before each entry.

    +
  • +
  • +

    none — Show as a flat list.

    +
  • +
+
+
+
+
+
+
+

Package Selection

+
+

By default, when no package selection options are given, the packages selected +depend on the selected manifest file (based on the current working directory if +--manifest-path is not given). If the manifest is the root of a workspace then +the workspaces default members are selected, otherwise only the package defined +by the manifest will be selected.

+
+
+

The default members of a workspace can be set explicitly with the +workspace.default-members key in the root manifest. If this is not set, a +virtual workspace will include all workspace members (equivalent to passing +--workspace), and a non-virtual workspace will include only the root crate itself.

+
+
+
+
-p SPEC…​
+
--package SPEC…​
+
+

Display only the specified packages. See cargo-pkgid(1) for the +SPEC format. This flag may be specified multiple times.

+
+
--workspace
+
+

Display all members in the workspace.

+
+
--exclude SPEC…​
+
+

Exclude the specified packages. Must be used in conjunction with the +--workspace flag. This flag may be specified multiple times.

+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches for the +Cargo.toml file in the current directory or any parent directory.

+
+
+
+
+
+

Feature Selection

+
+

The feature flags allow you to control the enabled features for the "current" +package. The "current" package is the package in the current directory, or the +one specified in --manifest-path. If running in the root of a virtual +workspace, then the default features are selected for all workspace members, +or all features if --all-features is specified.

+
+
+

When no feature options are given, the default feature is activated for +every selected package.

+
+
+
+
--features FEATURES
+
+

Space or comma separated list of features to activate. These features only +apply to the current directory’s package. Features of direct dependencies +may be enabled with <dep-name>/<feature-name> syntax. This flag may be +specified multiple times, which enables all specified features.

+
+
--all-features
+
+

Activate all available features of all selected packages.

+
+
--no-default-features
+
+

Do not activate the default feature of the current directory’s +package.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Common Options

+
+
+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
--offline
+
+

Prevents Cargo from accessing the network for any reason. Without this +flag, Cargo will stop with an error if it needs to access the network and +the network is not available. With this flag, Cargo will attempt to +proceed without the network if possible.

+
+

Beware that this may result in different dependency resolution than online +mode. Cargo will restrict itself to crates that are downloaded locally, even +if there might be a newer version as indicated in the local copy of the index. +See the cargo-fetch(1) command to download dependencies before going +offline.

+
+
+

May also be specified with the net.offline config value.

+
+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Display the tree for the package in the current directory:

    +
    +
    +
    cargo tree
    +
    +
    +
  2. +
  3. +

    Display all the packages that depend on the syn package:

    +
    +
    +
    cargo tree -i syn
    +
    +
    +
  4. +
  5. +

    Show the features enabled on each package:

    +
    +
    +
    cargo tree --format "{p} {f}"
    +
    +
    +
  6. +
  7. +

    Show all packages that are built multiple times. This can happen if multiple +semver-incompatible versions appear in the tree (like 1.0.0 and 2.0.0).

    +
    +
    +
    cargo tree -d
    +
    +
    +
  8. +
  9. +

    Explain why features are enabled for the syn package:

    +
    +
    +
    cargo tree -e features -i syn
    +
    +
    +
    +

    The -e features flag is used to show features. The -i flag is used to +invert the graph so that it displays the packages that depend on syn. An +example of what this would display:

    +
    +
    +
    +
    syn v1.0.17
    +├── syn feature "clone-impls"
    +│   └── syn feature "default"
    +│       └── rustversion v1.0.2
    +│           └── rustversion feature "default"
    +│               └── myproject v0.1.0 (/myproject)
    +│                   └── myproject feature "default" (command-line)
    +├── syn feature "default" (*)
    +├── syn feature "derive"
    +│   └── syn feature "default" (*)
    +├── syn feature "full"
    +│   └── rustversion v1.0.2 (*)
    +├── syn feature "parsing"
    +│   └── syn feature "default" (*)
    +├── syn feature "printing"
    +│   └── syn feature "default" (*)
    +├── syn feature "proc-macro"
    +│   └── syn feature "default" (*)
    +└── syn feature "quote"
    +    ├── syn feature "printing" (*)
    +    └── syn feature "proc-macro" (*)
    +
    +
    +
    +

    To read this graph, you can follow the chain for each feature from the root to +see why it is included. For example, the "full" feature is added by the +rustversion crate which is included from myproject (with the default +features), and myproject is the package selected on the command-line. All +of the other syn features are added by the "default" feature ("quote" is +added by "printing" and "proc-macro", both of which are default features).

    +
    +
    +

    If you’re having difficulty cross-referencing the de-duplicated (*) entries, +try with the --no-dedupe flag to get the full output.

    +
    +
  10. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-uninstall.html cargo-0.47.0/src/doc/man/generated/cargo-uninstall.html --- cargo-0.44.1/src/doc/man/generated/cargo-uninstall.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-uninstall.html 2020-07-17 20:39:39.000000000 +0000 @@ -115,6 +115,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-update.html cargo-0.47.0/src/doc/man/generated/cargo-update.html --- cargo-0.44.1/src/doc/man/generated/cargo-update.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-update.html 2020-07-17 20:39:39.000000000 +0000 @@ -149,6 +149,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-vendor.html cargo-0.47.0/src/doc/man/generated/cargo-vendor.html --- cargo-0.44.1/src/doc/man/generated/cargo-vendor.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-vendor.html 2020-07-17 20:39:39.000000000 +0000 @@ -22,7 +22,7 @@

The cargo vendor command will also print out the configuration necessary -to use the vendored sources, which you will need to add to .cargo/config.

+to use the vendored sources, which you will need to add to .cargo/config.toml.

@@ -30,7 +30,7 @@

OPTIONS

-

Owner Options

+

Vendor Options

-s MANIFEST
@@ -46,7 +46,7 @@
--respect-source-config
-

Instead of ignoring [source] configuration by default in .cargo/config +

Instead of ignoring [source] configuration by default in .cargo/config.toml read it and use it when downloading crates from crates.io, for example

--versioned-dirs
@@ -118,6 +118,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-verify-project.html cargo-0.47.0/src/doc/man/generated/cargo-verify-project.html --- cargo-0.44.1/src/doc/man/generated/cargo-verify-project.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-verify-project.html 2020-07-17 20:39:39.000000000 +0000 @@ -123,6 +123,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/generated/cargo-yank.html cargo-0.47.0/src/doc/man/generated/cargo-yank.html --- cargo-0.44.1/src/doc/man/generated/cargo-yank.html 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/generated/cargo-yank.html 2020-07-17 20:39:39.000000000 +0000 @@ -37,7 +37,7 @@

OPTIONS

-

Owner Options

+

Yank Options

--vers VERSION
@@ -120,6 +120,14 @@

Common Options

+
+TOOLCHAIN
+
+

If Cargo has been installed with rustup, and the first argument to cargo +begins with +, it will be interpreted as a rustup toolchain name (such +as +stable or +nightly). +See the rustup documentation +for more information about how toolchain overrides work.

+
-h
--help
diff -Nru cargo-0.44.1/src/doc/man/options-common.adoc cargo-0.47.0/src/doc/man/options-common.adoc --- cargo-0.44.1/src/doc/man/options-common.adoc 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/options-common.adoc 2020-07-17 20:39:39.000000000 +0000 @@ -1,3 +1,10 @@ +*+TOOLCHAIN*:: + If Cargo has been installed with rustup, and the first argument to `cargo` + begins with `+`, it will be interpreted as a rustup toolchain name (such + as `+stable` or `+nightly`). + See the link:https://github.com/rust-lang/rustup/[rustup documentation] + for more information about how toolchain overrides work. + *-h*:: *--help*:: Prints help information. diff -Nru cargo-0.44.1/src/doc/man/options-packages.adoc cargo-0.47.0/src/doc/man/options-packages.adoc --- cargo-0.44.1/src/doc/man/options-packages.adoc 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/man/options-packages.adoc 2020-07-17 20:39:39.000000000 +0000 @@ -17,8 +17,10 @@ *--workspace*:: {actionverb} all members in the workspace. +ifndef::noall[] *--all*:: Deprecated alias for `--workspace`. +endif::noall[] *--exclude* _SPEC_...:: Exclude the specified packages. Must be used in conjunction with the diff -Nru cargo-0.44.1/src/doc/src/commands/cargo-tree.md cargo-0.47.0/src/doc/src/commands/cargo-tree.md --- cargo-0.44.1/src/doc/src/commands/cargo-tree.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/doc/src/commands/cargo-tree.md 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,3 @@ +# cargo tree +{{#include command-common.html}} +{{#include ../../man/generated/cargo-tree.html}} diff -Nru cargo-0.44.1/src/doc/src/commands/manifest-commands.md cargo-0.47.0/src/doc/src/commands/manifest-commands.md --- cargo-0.44.1/src/doc/src/commands/manifest-commands.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/src/commands/manifest-commands.md 2020-07-17 20:39:39.000000000 +0000 @@ -3,6 +3,7 @@ * [cargo locate-project](cargo-locate-project.md) * [cargo metadata](cargo-metadata.md) * [cargo pkgid](cargo-pkgid.md) +* [cargo tree](cargo-tree.md) * [cargo update](cargo-update.md) * [cargo vendor](cargo-vendor.md) * [cargo verify-project](cargo-verify-project.md) diff -Nru cargo-0.44.1/src/doc/src/guide/cargo-home.md cargo-0.47.0/src/doc/src/guide/cargo-home.md --- cargo-0.44.1/src/doc/src/guide/cargo-home.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/src/guide/cargo-home.md 2020-07-17 20:39:39.000000000 +0000 @@ -12,10 +12,10 @@ ## Files: -* `config` +* `config.toml` Cargo's global configuration file, see the [config entry in the reference][config]. -* `credentials` +* `credentials.toml` Private login credentials from [`cargo login`] in order to log in to a registry. * `.crates.toml` diff -Nru cargo-0.44.1/src/doc/src/index.md cargo-0.47.0/src/doc/src/index.md --- cargo-0.44.1/src/doc/src/index.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/src/index.md 2020-07-17 20:39:39.000000000 +0000 @@ -23,12 +23,22 @@ The reference covers the details of various areas of Cargo. +**[Cargo Commands](commands/index.md)** + +The commands will let you interact with Cargo using its command-line interface. + **[Frequently Asked Questions](faq.md)** -**Appendicies:** +**Appendices:** * [Glossary](appendix/glossary.md) * [Git Authentication](appendix/git-authentication.md) +**Other Documentation:** +* [Changelog](https://github.com/rust-lang/cargo/blob/master/CHANGELOG.md) — + Detailed notes about changes in Cargo in each release. +* [Rust documentation website](https://doc.rust-lang.org/) — Links to official + Rust documentation and tools. + [rust]: https://www.rust-lang.org/ [crates.io]: https://crates.io/ [GitHub]: https://github.com/rust-lang/cargo/tree/master/src/doc diff -Nru cargo-0.44.1/src/doc/src/reference/config.md cargo-0.47.0/src/doc/src/reference/config.md --- cargo-0.44.1/src/doc/src/reference/config.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/src/reference/config.md 2020-07-17 20:39:39.000000000 +0000 @@ -12,14 +12,14 @@ `/projects/foo/bar/baz`, then the following configuration files would be probed for and unified in this order: -* `/projects/foo/bar/baz/.cargo/config` -* `/projects/foo/bar/.cargo/config` -* `/projects/foo/.cargo/config` -* `/projects/.cargo/config` -* `/.cargo/config` -* `$CARGO_HOME/config` which defaults to: - * Windows: `%USERPROFILE%\.cargo\config` - * Unix: `$HOME/.cargo/config` +* `/projects/foo/bar/baz/.cargo/config.toml` +* `/projects/foo/bar/.cargo/config.toml` +* `/projects/foo/.cargo/config.toml` +* `/projects/.cargo/config.toml` +* `/.cargo/config.toml` +* `$CARGO_HOME/config.toml` which defaults to: + * Windows: `%USERPROFILE%\.cargo\config.toml` + * Unix: `$HOME/.cargo/config.toml` With this structure, you can specify configuration per-package, and even possibly check it into version control. You can also specify personal defaults @@ -30,6 +30,11 @@ config directory taking precedence over ancestor directories, where the home directory is the lowest priority. Arrays will be joined together. +> **Note:** Cargo also reads config files without the `.toml` extension, such as +> `.cargo/config`. Support for the `.toml` extension was added in version 1.39 +> and is the preferred form. If both files exist, Cargo will use the file +> without the extension. + ### Configuration format Configuration files are written in the [TOML format][toml] (like the @@ -179,15 +184,15 @@ runner = "foo" # Searches `PATH` for `foo`. [source.vendored-sources] -# Directory is relative to the parent where `.cargo/config` is located. -# For example, `/my/project/.cargo/config` would result in `/my/project/vendor`. +# Directory is relative to the parent where `.cargo/config.toml` is located. +# For example, `/my/project/.cargo/config.toml` would result in `/my/project/vendor`. directory = "vendor" ``` ### Credentials Configuration values with sensitive information are stored in the -`$CARGO_HOME/credentials` file. This file is automatically created and updated +`$CARGO_HOME/credentials.toml` file. This file is automatically created and updated by [`cargo login`]. It follows the same format as Cargo config files. ```toml @@ -681,7 +686,7 @@ ##### `registry.index` -This value is deprecated and should not be used. +This value is no longer accepted and should not be used. ##### `registry.default` * Type: string diff -Nru cargo-0.44.1/src/doc/src/reference/environment-variables.md cargo-0.47.0/src/doc/src/reference/environment-variables.md --- cargo-0.44.1/src/doc/src/reference/environment-variables.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/src/reference/environment-variables.md 2020-07-17 20:39:39.000000000 +0000 @@ -98,7 +98,7 @@ * `CARGO_REGISTRIES__TOKEN` — Authentication token of a registry, see [`registries..token`]. * `CARGO_REGISTRY_DEFAULT` — Default registry for the `--registry` flag, see [`registry.default`]. * `CARGO_REGISTRY_TOKEN` — Authentication token for [crates.io], see [`registry.token`]. -* `CARGO_TARGET__LINKER` — The linker to use, see [`target..linker`]. +* `CARGO_TARGET__LINKER` — The linker to use, see [`target..linker`]. The triple must be [converted to uppercase and underscores](config.md#environment-variables). * `CARGO_TARGET__RUNNER` — The executable runner, see [`target..runner`]. * `CARGO_TARGET__RUSTFLAGS` — Extra `rustc` flags for a target, see [`target..rustflags`]. * `CARGO_TERM_VERBOSE` — The default terminal verbosity, see [`term.verbose`]. @@ -184,6 +184,10 @@ * `CARGO_PKG_DESCRIPTION` — The description from the manifest of your package. * `CARGO_PKG_HOMEPAGE` — The home page from the manifest of your package. * `CARGO_PKG_REPOSITORY` — The repository from the manifest of your package. +* `CARGO_PKG_LICENSE` — The license from the manifest of your package. +* `CARGO_PKG_LICENSE_FILE` — The license file from the manifest of your package. +* `CARGO_CRATE_NAME` — The name of the crate that is currently being compiled. +* `CARGO_BIN_NAME` — The name of the binary that is currently being compiled (if it is a binary). This name does not include any file extension, such as `.exe`. * `OUT_DIR` — If the package has a build script, this is set to the folder where the build script should place its output. See below for more information. (Only set during compilation.) @@ -294,7 +298,7 @@ use it as well. * `RUSTC_LINKER` — The path to the linker binary that Cargo has resolved to use for the current target, if specified. The linker can be - changed by editing `.cargo/config`; see the documentation + changed by editing `.cargo/config.toml`; see the documentation about [cargo configuration][cargo-config] for more information. diff -Nru cargo-0.44.1/src/doc/src/reference/external-tools.md cargo-0.47.0/src/doc/src/reference/external-tools.md --- cargo-0.44.1/src/doc/src/reference/external-tools.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/src/reference/external-tools.md 2020-07-17 20:39:39.000000000 +0000 @@ -48,7 +48,11 @@ of the `--message-format` option in the [build command documentation] for more details. +If you are using Rust, the [cargo_metadata] crate can be used to parse these +messages. + [build command documentation]: ../commands/cargo-build.md +[cargo_metadata]: https://crates.io/crates/cargo_metadata #### Compiler messages @@ -210,13 +214,37 @@ ["SOME_KEY", "some value"], ["ANOTHER_KEY", "another value"] ], - /* A path which is used as a value of `OUT_DIR` environmental variable - when compiling current package. + /* An absolute path which is used as a value of `OUT_DIR` environmental + variable when compiling current package. */ "out_dir": "/some/path/in/target/dir" } ``` +#### Build finished + +The "build-finished" message is emitted at the end of the build. + +```javascript +{ + /* The "reason" indicates the kind of message. */ + "reason": "build-finished", + /* Whether or not the build finished successfully. */ + "success": true, +} +```` + +This message can be helpful for tools to know when to stop reading JSON +messages. Commands such as `cargo test` or `cargo run` can produce additional +output after the build has finished. This message lets a tool know that Cargo +will not produce additional JSON messages, but there may be additional output +that may be generated afterwards (such as the output generated by the program +executed by `cargo run`). + +> Note: There is experimental nightly-only support for JSON output for tests, +> so additional test-specific JSON messages may begin arriving after the +> "build-finished" message if that is enabled. + ### Custom subcommands Cargo is designed to be extensible with new subcommands without having to modify diff -Nru cargo-0.44.1/src/doc/src/reference/manifest.md cargo-0.47.0/src/doc/src/reference/manifest.md --- cargo-0.44.1/src/doc/src/reference/manifest.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/src/reference/manifest.md 2020-07-17 20:39:39.000000000 +0000 @@ -152,10 +152,6 @@ documentation = "https://docs.rs/bitflags" ``` -> **Note**: [crates.io] may not show certain sites if they are known to not be -> hosting documentation and are possibly of malicious intent e.g., ad tracking -> networks. At this time, the site `rust-ci.org` is not allowed. - #### The `readme` field The `readme` field should be the path to a file in the package root (relative @@ -169,6 +165,12 @@ readme = "README.md" ``` +If no value is specified for this field, and a file named `README.md`, +`README.txt` or `README` exists in the package root, then the name of that +file will be used. You can suppress this behavior by setting this field to +`false`. If the field is set to `true`, a default value of `README.md` will +be assumed. + #### The `homepage` field The `homepage` field should be a URL to a site that is the home page for your @@ -424,6 +426,15 @@ assets = "path/to/static" ``` +There is a similar table at the workspace level at +[`workspace.metadata`][workspace-metadata]. While cargo does not specify a +format for the content of either of these tables, it is suggested that +external tools may wish to use them in a consistent fashion, such as referring +to the data in `workspace.metadata` if data is missing from `package.metadata`, +if that makes sense for the tool in question. + +[workspace-metadata]: workspaces.md#the-workspacemetadata-table + #### The `default-run` field The `default-run` field in the `[package]` section of the manifest can be used diff -Nru cargo-0.44.1/src/doc/src/reference/overriding-dependencies.md cargo-0.47.0/src/doc/src/reference/overriding-dependencies.md --- cargo-0.44.1/src/doc/src/reference/overriding-dependencies.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/src/reference/overriding-dependencies.md 2020-07-17 20:39:39.000000000 +0000 @@ -318,8 +318,8 @@ case Cargo offers a much more limited version of overrides called **path overrides**. -Path overrides are specified through [`.cargo/config`](config.md) instead of -`Cargo.toml`. Inside of `.cargo/config` you'll specify a key called `paths`: +Path overrides are specified through [`.cargo/config.toml`](config.md) instead of +`Cargo.toml`. Inside of `.cargo/config.toml` you'll specify a key called `paths`: ```toml paths = ["/path/to/uuid"] diff -Nru cargo-0.44.1/src/doc/src/reference/profiles.md cargo-0.47.0/src/doc/src/reference/profiles.md --- cargo-0.44.1/src/doc/src/reference/profiles.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/src/reference/profiles.md 2020-07-17 20:39:39.000000000 +0000 @@ -186,8 +186,8 @@ This option takes an integer greater than 0. -This option is ignored if [incremental](#incremental) is enabled, in which -case `rustc` uses an internal heuristic to split the crate. +The default is 256 for [incremental](#incremental) builds, and 16 for +non-incremental builds. [`-C codegen-units` flag]: ../../rustc/codegen-options/index.html#codegen-units @@ -217,7 +217,7 @@ lto = false panic = 'unwind' incremental = true -codegen-units = 16 # Note: ignored because `incremental` is enabled. +codegen-units = 256 rpath = false ``` @@ -258,7 +258,7 @@ lto = false panic = 'unwind' # This setting is always ignored. incremental = true -codegen-units = 16 # Note: ignored because `incremental` is enabled. +codegen-units = 256 rpath = false ``` diff -Nru cargo-0.44.1/src/doc/src/reference/publishing.md cargo-0.47.0/src/doc/src/reference/publishing.md --- cargo-0.44.1/src/doc/src/reference/publishing.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/src/reference/publishing.md 2020-07-17 20:39:39.000000000 +0000 @@ -21,7 +21,7 @@ ``` This command will inform Cargo of your API token and store it locally in your -`~/.cargo/credentials`. Note that this token is a **secret** and should not be +`~/.cargo/credentials.toml`. Note that this token is a **secret** and should not be shared with anyone else. If it leaks for any reason, you should regenerate it immediately. diff -Nru cargo-0.44.1/src/doc/src/reference/registries.md cargo-0.47.0/src/doc/src/reference/registries.md --- cargo-0.44.1/src/doc/src/reference/registries.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/src/reference/registries.md 2020-07-17 20:39:39.000000000 +0000 @@ -11,7 +11,7 @@ ### Using an Alternate Registry To use a registry other than [crates.io], the name and index URL of the -registry must be added to a [`.cargo/config` file][config]. The `registries` +registry must be added to a [`.cargo/config.toml` file][config]. The `registries` table has a key for each registry, for example: ```toml @@ -63,7 +63,7 @@ 2. `cargo publish --registry=my-registry` Instead of always passing the `--registry` command-line option, the default -registry may be set in [`.cargo/config`][config] with the `registry.default` +registry may be set in [`.cargo/config.toml`][config] with the `registry.default` key. Setting the `package.publish` key in the `Cargo.toml` manifest restricts which @@ -81,7 +81,7 @@ the same as an empty list. The authentication information saved by [`cargo login`] is stored in the -`credentials` file in the Cargo home directory (default `$HOME/.cargo`). It +`credentials.toml` file in the Cargo home directory (default `$HOME/.cargo`). It has a separate table for each registry, for example: ```toml @@ -127,9 +127,17 @@ The keys are: - `dl`: This is the URL for downloading crates listed in the index. The value - may have the markers `{crate}` and `{version}` which are replaced with the - name and version of the crate to download. If the markers are not present, - then the value `/{crate}/{version}/download` is appended to the end. + may have the following markers which will be replaced with their + corresponding value: + + - `{crate}`: The name of crate. + - `{version}`: The crate version. + - `{prefix}`: A directory prefix computed from the crate name. For example, + a crate named `cargo` has a prefix of `ca/rg`. See below for details. + - `{lowerprefix}`: Lowercase variant of `{prefix}`. + + If none of the markers are present, then the value + `/{crate}/{version}/download` is appended to the end. - `api`: This is the base URL for the web API. This key is optional, but if it is not specified, commands such as [`cargo publish`] will not work. The web API is described below. @@ -159,6 +167,21 @@ > package names in `Cargo.toml` and the index JSON data are case-sensitive and > may contain upper and lower case characters. +The directory name above is calculated based on the package name converted to +lowercase; it is represented by the marker `{lowerprefix}`. When the original +package name is used without case conversion, the resulting directory name is +represented by the marker `{prefix}`. For example, the package `MyCrate` would +have a `{prefix}` of `My/Cr` and a `{lowerprefix}` of `my/cr`. In general, +using `{prefix}` is recommended over `{lowerprefix}`, but there are pros and +cons to each choice. Using `{prefix}` on case-insensitive filesystems results +in (harmless-but-inelegant) directory aliasing. For example, `crate` and +`CrateTwo` have `{prefix}` values of `cr/at` and `Cr/at`; these are distinct on +Unix machines but alias to the same directory on Windows. Using directories +with normalized case avoids aliasing, but on case-sensitive filesystems it's +harder to suport older versions of Cargo that lack `{prefix}`/`{lowerprefix}`. +For example, nginx rewrite rules can easily construct `{prefix}` but can't +perform case-conversion to construct `{lowerprefix}`. + Registries should consider enforcing limitations on package names added to their index. Cargo itself allows names with any [alphanumeric], `-`, or `_` characters. [crates.io] imposes its own limitations, including the following: @@ -403,7 +426,7 @@ }, // The `links` string value from the package's manifest, or null if not // specified. This field is optional and defaults to null. - "links": null, + "links": null } ``` diff -Nru cargo-0.44.1/src/doc/src/reference/source-replacement.md cargo-0.47.0/src/doc/src/reference/source-replacement.md --- cargo-0.44.1/src/doc/src/reference/source-replacement.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/src/reference/source-replacement.md 2020-07-17 20:39:39.000000000 +0000 @@ -31,7 +31,7 @@ ### Configuration -Configuration of replacement sources is done through [`.cargo/config`][config] +Configuration of replacement sources is done through [`.cargo/config.toml`][config] and the full set of available keys are: ```toml @@ -42,7 +42,7 @@ # Under the `source` table are a number of other tables whose keys are a # name for the relevant source. For example this section defines a new # source, called `my-vendor-source`, which comes from a directory -# located at `vendor` relative to the directory containing this `.cargo/config` +# located at `vendor` relative to the directory containing this `.cargo/config.toml` # file [source.my-vendor-source] directory = "vendor" diff -Nru cargo-0.44.1/src/doc/src/reference/specifying-dependencies.md cargo-0.47.0/src/doc/src/reference/specifying-dependencies.md --- cargo-0.44.1/src/doc/src/reference/specifying-dependencies.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/src/reference/specifying-dependencies.md 2020-07-17 20:39:39.000000000 +0000 @@ -107,7 +107,7 @@ ### Specifying dependencies from other registries To specify a dependency from a registry other than [crates.io], first the -registry must be configured in a `.cargo/config` file. See the [registries +registry must be configured in a `.cargo/config.toml` file. See the [registries documentation] for more information. In the dependency, set the `registry` key to the name of the registry to use. diff -Nru cargo-0.44.1/src/doc/src/reference/unstable.md cargo-0.47.0/src/doc/src/reference/unstable.md --- cargo-0.44.1/src/doc/src/reference/unstable.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/src/reference/unstable.md 2020-07-17 20:39:39.000000000 +0000 @@ -26,7 +26,7 @@ The `-Z mtime-on-use` flag is an experiment to have Cargo update the mtime of used files to make it easier for tools like cargo-sweep to detect which files are stale. For many workflows this needs to be set on *all* invocations of cargo. -To make this more practical setting the `unstable.mtime_on_use` flag in `.cargo/config` +To make this more practical setting the `unstable.mtime_on_use` flag in `.cargo/config.toml` or the corresponding ENV variable will apply the `-Z mtime-on-use` to all invocations of nightly cargo. (the config flag is ignored by stable) @@ -76,7 +76,7 @@ cargo +nightly build --out-dir=out -Z unstable-options ``` -This can also be specified in `.cargo/config` files. +This can also be specified in `.cargo/config.toml` files. ```toml [build] @@ -93,12 +93,31 @@ present, cargo will continue as normal, passing the tests to doctest, while also passing it a `--target` option, as well as enabling `-Zunstable-features --enable-per-target-ignores` and passing along -information from `.cargo/config`. See the rustc issue for more information. +information from `.cargo/config.toml`. See the rustc issue for more information. ``` cargo test --target foo -Zdoctest-xcompile ``` +### multitarget +* Tracking Issue: [#8176](https://github.com/rust-lang/cargo/issues/8176) + +This flag allows passing multiple `--target` flags to the `cargo` subcommand +selected. When multiple `--target` flags are passed the selected build targets +will be built for each of the selected architectures. + +For example to compile a library for both 32 and 64-bit: + +``` +cargo build --target x86_64-unknown-linux-gnu --target i686-unknown-linux-gnu +``` + +or running tests for both targets: + +``` +cargo test --target x86_64-unknown-linux-gnu --target i686-unknown-linux-gnu +``` + ### Custom named profiles * Tracking Issue: [rust-lang/cargo#6988](https://github.com/rust-lang/cargo/issues/6988) @@ -474,7 +493,7 @@ CLI paths are relative to the current working directory. -## Features +### Features * Tracking Issues: * [itarget #7914](https://github.com/rust-lang/cargo/issues/7914) * [build_dep #7915](https://github.com/rust-lang/cargo/issues/7915) @@ -507,8 +526,8 @@ When building this example for a non-Windows platform, the `f2` feature will *not* be enabled. -* `build_dep` — Prevents features enabled on build dependencies from being - enabled for normal dependencies. For example: +* `host_dep` — Prevents features enabled on build dependencies or proc-macros + from being enabled for normal dependencies. For example: ```toml [dependencies] @@ -522,6 +541,9 @@ feature. When building the library of your package, it will not enable the feature. + Note that proc-macro decoupling requires changes to the registry, so it + won't be decoupled until the registry is updated to support the new field. + * `dev_dep` — Prevents features enabled on dev dependencies from being enabled for normal dependencies. For example: @@ -546,9 +568,261 @@ * `compare` — This option compares the resolved features to the old resolver, and will print any differences. +### package-features +* Tracking Issue: [#5364](https://github.com/rust-lang/cargo/issues/5364) + +The `-Zpackage-features` flag changes the way features can be passed on the +command-line for a workspace. The normal behavior can be confusing, as the +features passed are always enabled on the package in the current directory, +even if that package is not selected with a `-p` flag. Feature flags also do +not work in the root of a virtual workspace. `-Zpackage-features` tries to +make feature flags behave in a more intuitive manner. + +* `cargo build -p other_member --features …` — This now only enables the given + features as defined in `other_member` (ignores whatever is in the current + directory). +* `cargo build -p a -p b --features …` — This now enables the given features + on both `a` and `b`. Not all packages need to define every feature, it only + enables matching features. It is still an error if none of the packages + define a given feature. +* `--features` and `--no-default-features` are now allowed in the root of a + virtual workspace. +* `member_name/feature_name` syntax may now be used on the command-line to + enable features for a specific member. + +The ability to set features for non-workspace members is no longer allowed, as +the resolver fundamentally does not support that ability. + +### Resolver +* Tracking Issue: [#8088](https://github.com/rust-lang/cargo/issues/8088) + +The `resolver` feature allows the resolver version to be specified in the +`Cargo.toml` manifest. This allows a project to opt-in to +backwards-incompatible changes in the resolver. + +```toml +cargo-features = ["resolver"] + +[package] +name = "my-package" +version = "1.0.0" +resolver = "2" +``` + +Currently the only allowed value is `"2"`. This declaration enables all of the +new feature behavior of [`-Zfeatures=all`](#features) and +[`-Zpackage-features`](#package-features). + +This flag is global for a workspace. If using a virtual workspace, the root +definition should be in the `[workspace]` table like this: + +```toml +cargo-features = ["resolver"] + +[workspace] +members = ["member1", "member2"] +resolver = "2" +``` + +The `resolver` field is ignored in dependencies, only the top-level project or +workspace can control the new behavior. + ### crate-versions * Tracking Issue: [#7907](https://github.com/rust-lang/cargo/issues/7907) The `-Z crate-versions` flag will make `cargo doc` include appropriate crate versions for the current crate and all of its dependencies (unless `--no-deps` was provided) in the compiled documentation. You can find an example screenshot for the cargo itself in the tracking issue. + +### unit-graph +* Tracking Issue: [#8002](https://github.com/rust-lang/cargo/issues/8002) + +The `--unit-graph` flag can be passed to any build command (`build`, `check`, +`run`, `test`, `bench`, `doc`, etc.) to emit a JSON object to stdout which +represents Cargo's internal unit graph. Nothing is actually built, and the +command returns immediately after printing. Each "unit" corresponds to an +execution of the compiler. These objects also include which unit each unit +depends on. + +``` +cargo +nightly build --unit-graph -Z unstable-options +``` + +This structure provides a more complete view of the dependency relationship as +Cargo sees it. In particular, the "features" field supports the new feature +resolver where a dependency can be built multiple times with different +features. `cargo metadata` fundamentally cannot represent the relationship of +features between different dependency kinds, and features now depend on which +command is run and which packages and targets are selected. Additionally it +can provide details about intra-package dependencies like build scripts or +tests. + +The following is a description of the JSON structure: + +```javascript +{ + /* Version of the JSON output structure. If any backwards incompatible + changes are made, this value will be increased. + */ + "version": 1, + /* Array of all build units. */ + "units": [ + { + /* An opaque string which indicates the package. + Information about the package can be obtained from `cargo metadata`. + */ + "pkg_id": "my-package 0.1.0 (path+file:///path/to/my-package)", + /* The Cargo target. See the `cargo metadata` documentation for more + information about these fields. + https://doc.rust-lang.org/cargo/commands/cargo-metadata.html + */ + "target": { + "kind": ["lib"], + "crate_types": ["lib"], + "name": "my-package", + "src_path": "/path/to/my-package/src/lib.rs", + "edition": "2018", + "doctest": true + }, + /* The profile settings for this unit. + These values may not match the profile defined in the manifest. + Units can use modified profile settings. For example, the "panic" + setting can be overridden for tests to force it to "unwind". + */ + "profile": { + /* The profile name these settings are derived from. */ + "name": "dev", + /* The optimization level as a string. */ + "opt_level": "0", + /* The LTO setting as a string. */ + "lto": "false", + /* The codegen units as an integer. + `null` if it should use the compiler's default. + */ + "codegen_units": null, + /* The debug information level as an integer. + `null` if it should use the compiler's default (0). + */ + "debuginfo": 2, + /* Whether or not debug-assertions are enabled. */ + "debug_assertions": true, + /* Whether or not overflow-checks are enabled. */ + "overflow_checks": true, + /* Whether or not rpath is enabled. */ + "rpath": false, + /* Whether or not incremental is enabled. */ + "incremental": true, + /* The panic strategy, "unwind" or "abort". */ + "panic": "unwind" + }, + /* Which platform this target is being built for. + A value of `null` indicates it is for the host. + Otherwise it is a string of the target triple (such as + "x86_64-unknown-linux-gnu"). + */ + "platform": null, + /* The "mode" for this unit. Valid values: + + * "test" — Build using `rustc` as a test. + * "build" — Build using `rustc`. + * "check" — Build using `rustc` in "check" mode. + * "doc" — Build using `rustdoc`. + * "doctest" — Test using `rustdoc`. + * "run-custom-build" — Represents the execution of a build script. + */ + "mode": "build", + /* Array of features enabled on this unit as strings. */ + "features": ["somefeat"], + /* Whether or not this is a standard-library unit, + part of the unstable build-std feature. + If not set, treat as `false`. + */ + "is_std": false, + /* Array of dependencies of this unit. */ + "dependencies": [ + { + /* Index in the "units" array for the dependency. */ + "index": 1, + /* The name that this dependency will be referred as. */ + "extern_crate_name": "unicode_xid", + /* Whether or not this dependency is "public", + part of the unstable public-dependency feature. + If not set, the public-dependency feature is not enabled. + */ + "public": false, + /* Whether or not this dependency is injected into the prelude, + currently used by the build-std feature. + If not set, treat as `false`. + */ + "noprelude": false + } + ] + }, + // ... + ], + /* Array of indices in the "units" array that are the "roots" of the + dependency graph. + */ + "roots": [0], +} +``` + +### Profile `strip` option +* Tracking Issue: [rust-lang/rust#72110](https://github.com/rust-lang/rust/issues/72110) + +This feature provides a new option in the `[profile]` section to strip either +symbols or debuginfo from a binary. This can be enabled like so: + +```toml +cargo-features = ["strip"] + +[package] +# ... + +[profile.release] +strip = "debuginfo" +``` + +Other possible values of `strip` are `none` and `symbols`. The default is +`none`. + +### rustdoc-map +* Tracking Issue: [#8296](https://github.com/rust-lang/cargo/issues/8296) + +This feature adds configuration settings that are passed to `rustdoc` so that +it can generate links to dependencies whose documentation is hosted elsewhere +when the dependency is not documented. First, add this to `.cargo/config`: + +```toml +[doc.extern-map.registries] +crates-io = "https://docs.rs/" +``` + +Then, when building documentation, use the following flags to cause links +to dependencies to link to [docs.rs](https://docs.rs/): + +``` +cargo +nightly doc --no-deps -Zrustdoc-map +``` + +The `registries` table contains a mapping of registry name to the URL to link +to. The URL may have the markers `{pkg_name}` and `{version}` which will get +replaced with the corresponding values. If neither are specified, then Cargo +defaults to appending `{pkg_name}/{version}/` to the end of the URL. + +Another config setting is available to redirect standard library links. By +default, rustdoc creates links to . To +change this behavior, use the `doc.extern-map.std` setting: + +```toml +[doc.extern-map] +std = "local" +``` + +A value of `"local"` means to link to the documentation found in the `rustc` +sysroot. If you are using rustup, this documentation can be installed with +`rustup component add rust-docs`. + +The default value is `"remote"`. + +The value may also take a URL for a custom location. diff -Nru cargo-0.44.1/src/doc/src/reference/workspaces.md cargo-0.47.0/src/doc/src/reference/workspaces.md --- cargo-0.44.1/src/doc/src/reference/workspaces.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/src/reference/workspaces.md 2020-07-17 20:39:39.000000000 +0000 @@ -82,7 +82,31 @@ When specified, `default-members` must expand to a subset of `members`. +### The `workspace.metadata` table + +The `workspace.metadata` table is ignored by Cargo and will not be warned +about. This section can be used for tools that would like to store workspace +configuration in `Cargo.toml`. For example: + +```toml +[workspace] +members = ["member1", "member2"] + +[workspace.metadata.webcontents] +root = "path/to/webproject" +tool = ["npm", "run", "build"] +# ... +``` + +There is a similar set of tables at the package level at +[`package.metadata`][package-metadata]. While cargo does not specify a +format for the content of either of these tables, it is suggested that +external tools may wish to use them in a consistent fashion, such as referring +to the data in `workspace.metadata` if data is missing from `package.metadata`, +if that makes sense for the tool in question. + [package]: manifest.md#the-package-section +[package-metadata]: manifest.md#the-metadata-table [output directory]: ../guide/build-cache.md [patch]: overriding-dependencies.md#the-patch-section [replace]: overriding-dependencies.md#the-replace-section diff -Nru cargo-0.44.1/src/doc/src/SUMMARY.md cargo-0.47.0/src/doc/src/SUMMARY.md --- cargo-0.44.1/src/doc/src/SUMMARY.md 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/doc/src/SUMMARY.md 2020-07-17 20:39:39.000000000 +0000 @@ -59,6 +59,7 @@ * [cargo locate-project](commands/cargo-locate-project.md) * [cargo metadata](commands/cargo-metadata.md) * [cargo pkgid](commands/cargo-pkgid.md) + * [cargo tree](commands/cargo-tree.md) * [cargo update](commands/cargo-update.md) * [cargo vendor](commands/cargo-vendor.md) * [cargo verify-project](commands/cargo-verify-project.md) diff -Nru cargo-0.44.1/src/etc/_cargo cargo-0.47.0/src/etc/_cargo --- cargo-0.44.1/src/etc/_cargo 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/_cargo 2020-07-17 20:39:39.000000000 +0000 @@ -129,12 +129,6 @@ _arguments -s -S $common $manifest ;; - git-checkout) - _arguments -s -S $common \ - '--reference=:reference' \ - '--url=:url:_urls' - ;; - help) _cargo_cmds ;; @@ -277,6 +271,18 @@ '*: :_default' ;; + tree) + _arguments -s -S $common $features $triple $manifest \ + '(-p --package)'{-p+,--package=}'[package to use as the root]:package:_cargo_package_names' \ + '(-i --invert)'{-i+,--invert=}'[invert the tree for the given package]:package:_cargo_package_names' \ + '--prefix=[line prefix]:prefix:(depth indent none)' \ + '--no-dedupe[repeat shared dependencies]' \ + '(-d --duplicates)'{-d,--duplicates}'[packages with multiple versions]' \ + '--charset=[utf8 or ascii]:charset:(utf8 ascii)' \ + '(-f --format)'{-f,--format=}'[format string]:format' \ + '(-e --edges)'{-e,--edges=}'[edge kinds]:kind:(features normal build dev all no-dev no-build no-normal)' \ + ;; + uninstall) _arguments -s -S $common \ '(-p --package)'{-p+,--package=}'[specify package to uninstall]:package:_cargo_package_names' \ diff -Nru cargo-0.44.1/src/etc/cargo.bashcomp.sh cargo-0.47.0/src/etc/cargo.bashcomp.sh --- cargo-0.44.1/src/etc/cargo.bashcomp.sh 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/cargo.bashcomp.sh 2020-07-17 20:39:39.000000000 +0000 @@ -56,7 +56,6 @@ local opt__fetch="$opt_common $opt_mani $opt_lock --target" local opt__fix="$opt_common $opt_pkg_spec $opt_feat $opt_mani $opt_jobs $opt_targets $opt_lock --release --target --message-format --broken-code --edition --edition-idioms --allow-no-vcs --allow-dirty --allow-staged --profile --target-dir" local opt__generate_lockfile="$opt_common $opt_mani $opt_lock" - local opt__git_checkout="$opt_common $opt_lock --reference --url" local opt__help="$opt_help" local opt__init="$opt_common $opt_lock --bin --lib --name --vcs --edition --registry" local opt__install="$opt_common $opt_feat $opt_jobs $opt_lock $opt_force --bin --bins --branch --debug --example --examples --git --list --path --rev --root --tag --version --registry --target --profile --no-track" @@ -74,6 +73,7 @@ local opt__rustdoc="$opt_common $opt_pkg $opt_feat $opt_mani $opt_lock $opt_jobs $opt_targets --message-format --target --release --open --target-dir --profile" local opt__search="$opt_common $opt_lock --limit --index --registry" local opt__test="$opt_common $opt_pkg_spec $opt_feat $opt_mani $opt_lock $opt_jobs $opt_targets --message-format --doc --target --no-run --release --no-fail-fast --target-dir --profile" + local opt__tree="$opt_common $opt_pkg_spec $opt_feat $opt_mani $opt_lock --target -i --invert --prefix --no-dedupe --duplicates -d --charset -f --format -e --edges" local opt__uninstall="$opt_common $opt_lock $opt_pkg --bin --root" local opt__update="$opt_common $opt_mani $opt_lock $opt_pkg --aggressive --precise --dry-run" local opt__vendor="$opt_common $opt_mani $opt_lock $opt_sync --no-delete --respect-source-config --versioned-dirs" diff -Nru cargo-0.44.1/src/etc/man/cargo.1 cargo-0.47.0/src/etc/man/cargo.1 --- cargo-0.44.1/src/etc/man/cargo.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2019-12-04 +.\" Date: 2020-06-25 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO" "1" "2019-12-04" "\ \&" "\ \&" +.TH "CARGO" "1" "2020-06-25" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -125,6 +125,11 @@ Print a fully qualified package specification. .RE .sp +\fBcargo\-tree\fP(1) +.RS 4 +Display a tree visualization of a dependency graph. +.RE +.sp \fBcargo\-update\fP(1) .RS 4 Update dependencies as recorded in the local lock file. @@ -310,6 +315,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. @@ -352,21 +367,21 @@ rustup, executables distributed with Rust are also located here. .RE .sp -\fB$CARGO_HOME/config\fP +\fB$CARGO_HOME/config.toml\fP .RS 4 The global configuration file. See \c .URL "https://doc.rust\-lang.org/cargo/reference/config.html" "the reference" for more information about configuration files. .RE .sp -\fB.cargo/config\fP +\fB.cargo/config.toml\fP .RS 4 -Cargo automatically searches for a file named \fB.cargo/config\fP in the +Cargo automatically searches for a file named \fB.cargo/config.toml\fP in the current directory, and all parent directories. These configuration files will be merged with the global configuration file. .RE .sp -\fB$CARGO_HOME/credentials\fP +\fB$CARGO_HOME/credentials.toml\fP .RS 4 Private authentication information for logging in to a registry. .RE diff -Nru cargo-0.44.1/src/etc/man/cargo-bench.1 cargo-0.47.0/src/etc/man/cargo-bench.1 --- cargo-0.44.1/src/etc/man/cargo-bench.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-bench.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-bench .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2020-02-06 +.\" Date: 2020-06-25 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-BENCH" "1" "2020-02-06" "\ \&" "\ \&" +.TH "CARGO\-BENCH" "1" "2020-06-25" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -59,6 +59,22 @@ The libtest harness may be disabled by setting \fBharness = false\fP in the target manifest settings, in which case your code will need to provide its own \fBmain\fP function to handle running benchmarks. +.RS 3 +.ll -.6i +.sp +\fBNote\fP: The +\c +.URL "https://doc.rust\-lang.org/nightly/unstable\-book/library\-features/test.html" "\fB#[bench]\fP attribute" +is currently unstable and only available on the +.URL "https://doc.rust\-lang.org/book/appendix\-07\-nightly\-rust.html" "nightly channel" "." +There are some packages available on +.URL "https://crates.io/keywords/benchmark" "crates.io" " " +that may help with +running benchmarks on the stable channel, such as +.URL "https://crates.io/crates/criterion" "Criterion" "." +.br +.RE +.ll .SH "OPTIONS" .SS "Benchmark Options" .sp @@ -484,6 +500,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-build.1 cargo-0.47.0/src/etc/man/cargo-build.1 --- cargo-0.44.1/src/etc/man/cargo-build.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-build.1 2020-07-17 20:39:39.000000000 +0000 @@ -407,6 +407,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-check.1 cargo-0.47.0/src/etc/man/cargo-check.1 --- cargo-0.44.1/src/etc/man/cargo-check.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-check.1 2020-07-17 20:39:39.000000000 +0000 @@ -393,6 +393,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-clean.1 cargo-0.47.0/src/etc/man/cargo-clean.1 --- cargo-0.44.1/src/etc/man/cargo-clean.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-clean.1 2020-07-17 20:39:39.000000000 +0000 @@ -182,6 +182,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-doc.1 cargo-0.47.0/src/etc/man/cargo-doc.1 --- cargo-0.44.1/src/etc/man/cargo-doc.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-doc.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-doc .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2020-02-06 +.\" Date: 2020-04-21 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-DOC" "1" "2020-02-06" "\ \&" "\ \&" +.TH "CARGO\-DOC" "1" "2020-04-21" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -352,6 +352,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-fetch.1 cargo-0.47.0/src/etc/man/cargo-fetch.1 --- cargo-0.44.1/src/etc/man/cargo-fetch.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-fetch.1 2020-07-17 20:39:39.000000000 +0000 @@ -160,6 +160,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-fix.1 cargo-0.47.0/src/etc/man/cargo-fix.1 --- cargo-0.44.1/src/etc/man/cargo-fix.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-fix.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-fix .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2020-02-06 +.\" Date: 2020-06-25 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-FIX" "1" "2020-02-06" "\ \&" "\ \&" +.TH "CARGO\-FIX" "1" "2020-06-25" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -467,6 +467,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-generate-lockfile.1 cargo-0.47.0/src/etc/man/cargo-generate-lockfile.1 --- cargo-0.44.1/src/etc/man/cargo-generate-lockfile.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-generate-lockfile.1 2020-07-17 20:39:39.000000000 +0000 @@ -135,6 +135,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-help.1 cargo-0.47.0/src/etc/man/cargo-help.1 --- cargo-0.44.1/src/etc/man/cargo-help.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-help.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-help .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2019-06-03 +.\" Date: 2019-09-05 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-HELP" "1" "2019-06-03" "\ \&" "\ \&" +.TH "CARGO\-HELP" "1" "2019-09-05" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 diff -Nru cargo-0.44.1/src/etc/man/cargo-init.1 cargo-0.47.0/src/etc/man/cargo-init.1 --- cargo-0.44.1/src/etc/man/cargo-init.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-init.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-init .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2019-06-03 +.\" Date: 2019-09-05 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-INIT" "1" "2019-06-03" "\ \&" "\ \&" +.TH "CARGO\-INIT" "1" "2019-09-05" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -314,6 +314,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-install.1 cargo-0.47.0/src/etc/man/cargo-install.1 --- cargo-0.44.1/src/etc/man/cargo-install.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-install.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-install .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2020-02-06 +.\" Date: 2020-07-01 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-INSTALL" "1" "2020-02-06" "\ \&" "\ \&" +.TH "CARGO\-INSTALL" "1" "2020-07-01" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -293,6 +293,11 @@ If not specified, the default registry is used, which is defined by the \fBregistry.default\fP config key which defaults to \fBcrates\-io\fP. .RE +.sp +\fB\-\-index\fP \fIINDEX\fP +.RS 4 +The URL of the registry index to use. +.RE .SS "Feature Selection" .sp The feature flags allow you to control the enabled features for the "current" @@ -340,6 +345,16 @@ documentation for more details. .RE .sp +\fB\-\-target\-dir\fP \fIDIRECTORY\fP +.RS 4 +Directory for all generated artifacts and intermediate files. May also be +specified with the \fBCARGO_TARGET_DIR\fP environment variable, or the +\fBbuild.target\-dir\fP \c +.URL "https://doc.rust\-lang.org/cargo/reference/config.html" "config value" "." +Defaults +to \fBtarget\fP in the root of the workspace. +.RE +.sp \fB\-\-debug\fP .RS 4 Build with the \fBdev\fP profile instead the \fBrelease\fP profile. @@ -442,6 +457,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-locate-project.1 cargo-0.47.0/src/etc/man/cargo-locate-project.1 --- cargo-0.44.1/src/etc/man/cargo-locate-project.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-locate-project.1 2020-07-17 20:39:39.000000000 +0000 @@ -105,6 +105,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-login.1 cargo-0.47.0/src/etc/man/cargo-login.1 --- cargo-0.44.1/src/etc/man/cargo-login.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-login.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-login .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2019-06-03 +.\" Date: 2020-06-25 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-LOGIN" "1" "2019-06-03" "\ \&" "\ \&" +.TH "CARGO\-LOGIN" "1" "2020-06-25" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -36,7 +36,7 @@ .sp This command will save the API token to disk so that commands that require authentication, such as \fBcargo\-publish\fP(1), will be automatically -authenticated. The token is saved in \fB$CARGO_HOME/credentials\fP. \fBCARGO_HOME\fP +authenticated. The token is saved in \fB$CARGO_HOME/credentials.toml\fP. \fBCARGO_HOME\fP defaults to \fB.cargo\fP in your home directory. .sp If the \fITOKEN\fP argument is not specified, it will be read from stdin. @@ -113,6 +113,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-metadata.1 cargo-0.47.0/src/etc/man/cargo-metadata.1 --- cargo-0.44.1/src/etc/man/cargo-metadata.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-metadata.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-metadata .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2020-02-24 +.\" Date: 2020-07-04 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-METADATA" "1" "2020-02-24" "\ \&" "\ \&" +.TH "CARGO\-METADATA" "1" "2020-07-04" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -289,6 +289,15 @@ "version": 1, /* The absolute path to the root of the workspace. */ "workspace_root": "/path/to/my\-package" + /* Workspace metadata. + This is null if no metadata is specified. */ + "metadata": { + "docs": { + "rs": { + "all\-features": true + } + } + } } .fi .if n .RE @@ -439,6 +448,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. @@ -486,4 +505,4 @@ .RE .SH "SEE ALSO" .sp -\fBcargo\fP(1) \ No newline at end of file +\fBcargo\fP(1) diff -Nru cargo-0.44.1/src/etc/man/cargo-new.1 cargo-0.47.0/src/etc/man/cargo-new.1 --- cargo-0.44.1/src/etc/man/cargo-new.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-new.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-new .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2019-06-03 +.\" Date: 2019-09-05 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-NEW" "1" "2019-06-03" "\ \&" "\ \&" +.TH "CARGO\-NEW" "1" "2019-09-05" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -309,6 +309,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-owner.1 cargo-0.47.0/src/etc/man/cargo-owner.1 --- cargo-0.44.1/src/etc/man/cargo-owner.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-owner.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-owner .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2019-06-03 +.\" Date: 2019-09-05 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-OWNER" "1" "2019-06-03" "\ \&" "\ \&" +.TH "CARGO\-OWNER" "1" "2019-09-05" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -154,6 +154,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-package.1 cargo-0.47.0/src/etc/man/cargo-package.1 --- cargo-0.44.1/src/etc/man/cargo-package.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-package.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-package .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2020-02-06 +.\" Date: 2020-06-25 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-PACKAGE" "1" "2020-02-06" "\ \&" "\ \&" +.TH "CARGO\-PACKAGE" "1" "2020-06-25" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -348,6 +348,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-pkgid.1 cargo-0.47.0/src/etc/man/cargo-pkgid.1 --- cargo-0.44.1/src/etc/man/cargo-pkgid.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-pkgid.1 2020-07-17 20:39:39.000000000 +0000 @@ -212,6 +212,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-publish.1 cargo-0.47.0/src/etc/man/cargo-publish.1 --- cargo-0.44.1/src/etc/man/cargo-publish.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-publish.1 2020-07-17 20:39:39.000000000 +0000 @@ -298,6 +298,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-run.1 cargo-0.47.0/src/etc/man/cargo-run.1 --- cargo-0.44.1/src/etc/man/cargo-run.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-run.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-run .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2019-11-11 +.\" Date: 2020-04-21 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-RUN" "1" "2019-11-11" "\ \&" "\ \&" +.TH "CARGO\-RUN" "1" "2020-04-21" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -304,6 +304,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-rustc.1 cargo-0.47.0/src/etc/man/cargo-rustc.1 --- cargo-0.44.1/src/etc/man/cargo-rustc.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-rustc.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-rustc .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2020-02-06 +.\" Date: 2020-06-25 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-RUSTC" "1" "2020-02-06" "\ \&" "\ \&" +.TH "CARGO\-RUSTC" "1" "2020-06-25" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -375,6 +375,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-rustdoc.1 cargo-0.47.0/src/etc/man/cargo-rustdoc.1 --- cargo-0.44.1/src/etc/man/cargo-rustdoc.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-rustdoc.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-rustdoc .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2020-02-06 +.\" Date: 2020-06-25 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-RUSTDOC" "1" "2020-02-06" "\ \&" "\ \&" +.TH "CARGO\-RUSTDOC" "1" "2020-06-25" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -385,6 +385,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-search.1 cargo-0.47.0/src/etc/man/cargo-search.1 --- cargo-0.44.1/src/etc/man/cargo-search.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-search.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-search .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2019-06-03 +.\" Date: 2019-09-05 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-SEARCH" "1" "2019-06-03" "\ \&" "\ \&" +.TH "CARGO\-SEARCH" "1" "2019-09-05" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -117,6 +117,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-test.1 cargo-0.47.0/src/etc/man/cargo-test.1 --- cargo-0.44.1/src/etc/man/cargo-test.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-test.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-test .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2020-02-19 +.\" Date: 2020-06-25 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-TEST" "1" "2020-02-19" "\ \&" "\ \&" +.TH "CARGO\-TEST" "1" "2020-06-25" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -536,6 +536,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-tree.1 cargo-0.47.0/src/etc/man/cargo-tree.1 --- cargo-0.44.1/src/etc/man/cargo-tree.1 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-tree.1 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,638 @@ +'\" t +.\" Title: cargo-tree +.\" Author: [see the "AUTHOR(S)" section] +.\" Generator: Asciidoctor 2.0.10 +.\" Date: 2020-06-25 +.\" Manual: \ \& +.\" Source: \ \& +.\" Language: English +.\" +.TH "CARGO\-TREE" "1" "2020-06-25" "\ \&" "\ \&" +.ie \n(.g .ds Aq \(aq +.el .ds Aq ' +.ss \n[.ss] 0 +.nh +.ad l +.de URL +\fI\\$2\fP <\\$1>\\$3 +.. +.als MTO URL +.if \n[.g] \{\ +. mso www.tmac +. am URL +. ad l +. . +. am MTO +. ad l +. . +. LINKSTYLE blue R < > +.\} +.SH "NAME" +cargo\-tree \- Display a tree visualization of a dependency graph +.SH "SYNOPSIS" +.sp +\fBcargo tree [\fIOPTIONS\fP]\fP +.SH "DESCRIPTION" +.sp +This command will display a tree of dependencies to the terminal. An example +of a simple project that depends on the "rand" package: +.sp +.if n .RS 4 +.nf +myproject v0.1.0 (/myproject) +`\-\- rand v0.7.3 + |\-\- getrandom v0.1.14 + | |\-\- cfg\-if v0.1.10 + | `\-\- libc v0.2.68 + |\-\- libc v0.2.68 (*) + |\-\- rand_chacha v0.2.2 + | |\-\- ppv\-lite86 v0.2.6 + | `\-\- rand_core v0.5.1 + | `\-\- getrandom v0.1.14 (*) + `\-\- rand_core v0.5.1 (*) +[build\-dependencies] +`\-\- cc v1.0.50 +.fi +.if n .RE +.sp +Packages marked with \fB(*)\fP have been "de\-duplicated". The dependencies for the +package have already been shown elswhere in the graph, and so are not +repeated. Use the \fB\-\-no\-dedupe\fP option to repeat the duplicates. +.sp +The \fB\-e\fP flag can be used to select the dependency kinds to display. The +"features" kind changes the output to display the features enabled by +each dependency. For example, \fBcargo tree \-e features\fP: +.sp +.if n .RS 4 +.nf +myproject v0.1.0 (/myproject) +`\-\- log feature "serde" + `\-\- log v0.4.8 + |\-\- serde v1.0.106 + `\-\- cfg\-if feature "default" + `\-\- cfg\-if v0.1.10 +.fi +.if n .RE +.sp +In this tree, \fBmyproject\fP depends on \fBlog\fP with the \fBserde\fP feature. \fBlog\fP in +turn depends on \fBcfg\-if\fP with "default" features. When using \fB\-e features\fP it +can be helpful to use \fB\-i\fP flag to show how the features flow into a package. +See the examples below for more detail. +.SH "OPTIONS" +.SS "Tree Options" +.sp +\fB\-i\fP \fISPEC\fP, \fB\-\-invert\fP \fISPEC\fP +.RS 4 +Show the reverse dependencies for the given package. This flag will invert +the tree and display the packages that depend on the given package. +.sp +Note that in a workspace, by default it will only display the package\(cqs +reverse dependencies inside the tree of the workspace member in the current +directory. The \fB\-\-workspace\fP flag can be used to extend it so that it will +show the package\(cqs reverse dependencies across the entire workspace. The \fB\-p\fP +flag can be used to display the package\(cqs reverse dependencies only with the +subtree of the package given to \fB\-p\fP. +.RE +.sp +\fB\-\-no\-dedupe\fP +.RS 4 +Do not de\-duplicate repeated dependencies. Usually, when a package has +already displayed its dependencies, further occurrences will not +re\-display its dependencies, and will include a \fB(*)\fP to indicate it has +already been shown. This flag will cause those duplicates to be repeated. +.RE +.sp +\fB\-d\fP, \fB\-\-duplicates\fP +.RS 4 +Show only dependencies which come in multiple versions (implies +\fB\-\-invert\fP). When used with the \fB\-p\fP flag, only shows duplicates within +the subtree of the given package. +.sp +It can be beneficial for build times and executable sizes to avoid building +that same package multiple times. This flag can help identify the offending +packages. You can then investigate if the package that depends on the +duplicate with the older version can be updated to the newer version so that +only one instance is built. +.RE +.sp +\fB\-e\fP \fIKINDS\fP, \fB\-\-edges\fP \fIKINDS\fP +.RS 4 +The dependency kinds to display. Takes a comma separated list of values: +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fBall\fP — Show all edge kinds. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fBnormal\fP — Show normal dependencies. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fBbuild\fP — Show build dependencies. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fBdev\fP — Show development dependencies. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fBfeatures\fP — Show features enabled by each dependency. If this is +the only kind given, then it will automatically include the other +dependency kinds. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fBno\-normal\fP — Do not include normal dependencies. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fBno\-build\fP — Do not include build dependencies. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fBno\-dev\fP — Do not include development dependencies. +.RE +.sp +The \fBno\-\fP prefixed options cannot be mixed with the other dependency kinds. +.sp +The default is \fBnormal,build,dev\fP. +.RE +.sp +\fB\-\-target\fP \fITRIPLE\fP +.RS 4 +Filter dependencies matching the given target\-triple. +The default is the host platform. Use the value \fBall\fP to include \fBall\fP +targets. +.RE +.SS "Tree Formatting Options" +.sp +\fB\-\-charset\fP \fICHARSET\fP +.RS 4 +Chooses the character set to use for the tree. Valid values are "utf8" or +"ascii". Default is "utf8". +.RE +.sp +\fB\-f\fP \fIFORMAT\fP, \fB\-\-format\fP \fIFORMAT\fP +.RS 4 +Set the format string for each package. The default is "{p}". +.sp +This is an arbitrary string which will be used to display each package. The following +strings will be replaced with the corresponding value: +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fB{p}\fP — The package name. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fB{l}\fP — The package license. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fB{r}\fP — The package repository URL. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fB{f}\fP — Comma\-separated list of package features that are enabled. +.RE +.RE +.sp +\fB\-\-prefix\fP \fIPREFIX\fP +.RS 4 +Sets how each line is displayed. The \fIPREFIX\fP value can be one of: +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fBindent\fP (default) — Shows each line indented as a tree. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fBdepth\fP — Show as a list, with the numeric depth printed before each entry. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fBnone\fP — Show as a flat list. +.RE +.RE +.SS "Package Selection" +.sp +By default, when no package selection options are given, the packages selected +depend on the selected manifest file (based on the current working directory if +\fB\-\-manifest\-path\fP is not given). If the manifest is the root of a workspace then +the workspaces default members are selected, otherwise only the package defined +by the manifest will be selected. +.sp +The default members of a workspace can be set explicitly with the +\fBworkspace.default\-members\fP key in the root manifest. If this is not set, a +virtual workspace will include all workspace members (equivalent to passing +\fB\-\-workspace\fP), and a non\-virtual workspace will include only the root crate itself. +.sp +\fB\-p\fP \fISPEC\fP..., \fB\-\-package\fP \fISPEC\fP... +.RS 4 +Display only the specified packages. See \fBcargo\-pkgid\fP(1) for the +SPEC format. This flag may be specified multiple times. +.RE +.sp +\fB\-\-workspace\fP +.RS 4 +Display all members in the workspace. +.RE +.sp +\fB\-\-exclude\fP \fISPEC\fP... +.RS 4 +Exclude the specified packages. Must be used in conjunction with the +\fB\-\-workspace\fP flag. This flag may be specified multiple times. +.RE +.SS "Manifest Options" +.sp +\fB\-\-manifest\-path\fP \fIPATH\fP +.RS 4 +Path to the \fBCargo.toml\fP file. By default, Cargo searches for the +\fBCargo.toml\fP file in the current directory or any parent directory. +.RE +.SS "Feature Selection" +.sp +The feature flags allow you to control the enabled features for the "current" +package. The "current" package is the package in the current directory, or the +one specified in \fB\-\-manifest\-path\fP. If running in the root of a virtual +workspace, then the default features are selected for all workspace members, +or all features if \fB\-\-all\-features\fP is specified. +.sp +When no feature options are given, the \fBdefault\fP feature is activated for +every selected package. +.sp +\fB\-\-features\fP \fIFEATURES\fP +.RS 4 +Space or comma separated list of features to activate. These features only +apply to the current directory\(cqs package. Features of direct dependencies +may be enabled with \fB/\fP syntax. This flag may be +specified multiple times, which enables all specified features. +.RE +.sp +\fB\-\-all\-features\fP +.RS 4 +Activate all available features of all selected packages. +.RE +.sp +\fB\-\-no\-default\-features\fP +.RS 4 +Do not activate the \fBdefault\fP feature of the current directory\(cqs +package. +.RE +.SS "Display Options" +.sp +\fB\-v\fP, \fB\-\-verbose\fP +.RS 4 +Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the \fBterm.verbose\fP +.URL "https://doc.rust\-lang.org/cargo/reference/config.html" "config value" "." +.RE +.sp +\fB\-q\fP, \fB\-\-quiet\fP +.RS 4 +No output printed to stdout. +.RE +.sp +\fB\-\-color\fP \fIWHEN\fP +.RS 4 +Control when colored output is used. Valid values: +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fBauto\fP (default): Automatically detect if color support is available on the +terminal. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fBalways\fP: Always display colors. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +. sp -1 +. IP \(bu 2.3 +.\} +\fBnever\fP: Never display colors. +.RE +.sp +May also be specified with the \fBterm.color\fP +.URL "https://doc.rust\-lang.org/cargo/reference/config.html" "config value" "." +.RE +.SS "Common Options" +.sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp +\fB\-h\fP, \fB\-\-help\fP +.RS 4 +Prints help information. +.RE +.sp +\fB\-Z\fP \fIFLAG\fP... +.RS 4 +Unstable (nightly\-only) flags to Cargo. Run \fBcargo \-Z help\fP for +details. +.RE +.sp +\fB\-\-frozen\fP, \fB\-\-locked\fP +.RS 4 +Either of these flags requires that the \fBCargo.lock\fP file is +up\-to\-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The \fB\-\-frozen\fP flag also prevents Cargo from +attempting to access the network to determine if it is out\-of\-date. +.sp +These may be used in environments where you want to assert that the +\fBCargo.lock\fP file is up\-to\-date (such as a CI build) or want to avoid network +access. +.RE +.sp +\fB\-\-offline\fP +.RS 4 +Prevents Cargo from accessing the network for any reason. Without this +flag, Cargo will stop with an error if it needs to access the network and +the network is not available. With this flag, Cargo will attempt to +proceed without the network if possible. +.sp +Beware that this may result in different dependency resolution than online +mode. Cargo will restrict itself to crates that are downloaded locally, even +if there might be a newer version as indicated in the local copy of the index. +See the \fBcargo\-fetch\fP(1) command to download dependencies before going +offline. +.sp +May also be specified with the \fBnet.offline\fP \c +.URL "https://doc.rust\-lang.org/cargo/reference/config.html" "config value" "." +.RE +.SH "ENVIRONMENT" +.sp +See \c +.URL "https://doc.rust\-lang.org/cargo/reference/environment\-variables.html" "the reference" " " +for +details on environment variables that Cargo reads. +.SH "EXIT STATUS" +.sp +0 +.RS 4 +Cargo succeeded. +.RE +.sp +101 +.RS 4 +Cargo failed to complete. +.RE +.SH "EXAMPLES" +.sp +.RS 4 +.ie n \{\ +\h'-04' 1.\h'+01'\c +.\} +.el \{\ +. sp -1 +. IP " 1." 4.2 +.\} +Display the tree for the package in the current directory: +.sp +.if n .RS 4 +.nf +cargo tree +.fi +.if n .RE +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04' 2.\h'+01'\c +.\} +.el \{\ +. sp -1 +. IP " 2." 4.2 +.\} +Display all the packages that depend on the \fBsyn\fP package: +.sp +.if n .RS 4 +.nf +cargo tree \-i syn +.fi +.if n .RE +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04' 3.\h'+01'\c +.\} +.el \{\ +. sp -1 +. IP " 3." 4.2 +.\} +Show the features enabled on each package: +.sp +.if n .RS 4 +.nf +cargo tree \-\-format "{p} {f}" +.fi +.if n .RE +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04' 4.\h'+01'\c +.\} +.el \{\ +. sp -1 +. IP " 4." 4.2 +.\} +Show all packages that are built multiple times. This can happen if multiple +semver\-incompatible versions appear in the tree (like 1.0.0 and 2.0.0). +.sp +.if n .RS 4 +.nf +cargo tree \-d +.fi +.if n .RE +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04' 5.\h'+01'\c +.\} +.el \{\ +. sp -1 +. IP " 5." 4.2 +.\} +Explain why features are enabled for the \fBsyn\fP package: +.sp +.if n .RS 4 +.nf +cargo tree \-e features \-i syn +.fi +.if n .RE +.sp +The \fB\-e features\fP flag is used to show features. The \fB\-i\fP flag is used to +invert the graph so that it displays the packages that depend on \fBsyn\fP. An +example of what this would display: +.sp +.if n .RS 4 +.nf +syn v1.0.17 +|\-\- syn feature "clone\-impls" +| `\-\- syn feature "default" +| `\-\- rustversion v1.0.2 +| `\-\- rustversion feature "default" +| `\-\- myproject v0.1.0 (/myproject) +| `\-\- myproject feature "default" (command\-line) +|\-\- syn feature "default" (*) +|\-\- syn feature "derive" +| `\-\- syn feature "default" (*) +|\-\- syn feature "full" +| `\-\- rustversion v1.0.2 (*) +|\-\- syn feature "parsing" +| `\-\- syn feature "default" (*) +|\-\- syn feature "printing" +| `\-\- syn feature "default" (*) +|\-\- syn feature "proc\-macro" +| `\-\- syn feature "default" (*) +`\-\- syn feature "quote" + |\-\- syn feature "printing" (*) + `\-\- syn feature "proc\-macro" (*) +.fi +.if n .RE +.sp +To read this graph, you can follow the chain for each feature from the root to +see why it is included. For example, the "full" feature is added by the +\fBrustversion\fP crate which is included from \fBmyproject\fP (with the default +features), and \fBmyproject\fP is the package selected on the command\-line. All +of the other \fBsyn\fP features are added by the "default" feature ("quote" is +added by "printing" and "proc\-macro", both of which are default features). +.sp +If you\(cqre having difficulty cross\-referencing the de\-duplicated \fB(*)\fP entries, +try with the \fB\-\-no\-dedupe\fP flag to get the full output. +.RE +.SH "SEE ALSO" +.sp +\fBcargo\fP(1), \fBcargo\-metadata\fP(1) \ No newline at end of file diff -Nru cargo-0.44.1/src/etc/man/cargo-uninstall.1 cargo-0.47.0/src/etc/man/cargo-uninstall.1 --- cargo-0.44.1/src/etc/man/cargo-uninstall.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-uninstall.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-uninstall .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2019-06-03 +.\" Date: 2019-09-05 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-UNINSTALL" "1" "2019-06-03" "\ \&" "\ \&" +.TH "CARGO\-UNINSTALL" "1" "2019-09-05" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -173,6 +173,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-update.1 cargo-0.47.0/src/etc/man/cargo-update.1 --- cargo-0.44.1/src/etc/man/cargo-update.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-update.1 2020-07-17 20:39:39.000000000 +0000 @@ -165,6 +165,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-vendor.1 cargo-0.47.0/src/etc/man/cargo-vendor.1 --- cargo-0.44.1/src/etc/man/cargo-vendor.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-vendor.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-vendor .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2019-12-09 +.\" Date: 2020-06-25 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-VENDOR" "1" "2019-12-09" "\ \&" "\ \&" +.TH "CARGO\-VENDOR" "1" "2020-06-25" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -41,9 +41,9 @@ specified with the \fB\-s\fP option. .sp The \fBcargo vendor\fP command will also print out the configuration necessary -to use the vendored sources, which you will need to add to \fB.cargo/config\fP. +to use the vendored sources, which you will need to add to \fB.cargo/config.toml\fP. .SH "OPTIONS" -.SS "Owner Options" +.SS "Vendor Options" .sp \fB\-s\fP \fIMANIFEST\fP, \fB\-\-sync\fP \fIMANIFEST\fP .RS 4 @@ -59,7 +59,7 @@ .sp \fB\-\-respect\-source\-config\fP .RS 4 -Instead of ignoring \fB[source]\fP configuration by default in \fB.cargo/config\fP +Instead of ignoring \fB[source]\fP configuration by default in \fB.cargo/config.toml\fP read it and use it when downloading crates from crates.io, for example .RE .sp @@ -136,6 +136,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-verify-project.1 cargo-0.47.0/src/etc/man/cargo-verify-project.1 --- cargo-0.44.1/src/etc/man/cargo-verify-project.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-verify-project.1 2020-07-17 20:39:39.000000000 +0000 @@ -145,6 +145,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/src/etc/man/cargo-version.1 cargo-0.47.0/src/etc/man/cargo-version.1 --- cargo-0.44.1/src/etc/man/cargo-version.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-version.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-version .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2019-06-03 +.\" Date: 2019-09-05 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-VERSION" "1" "2019-06-03" "\ \&" "\ \&" +.TH "CARGO\-VERSION" "1" "2019-09-05" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 diff -Nru cargo-0.44.1/src/etc/man/cargo-yank.1 cargo-0.47.0/src/etc/man/cargo-yank.1 --- cargo-0.44.1/src/etc/man/cargo-yank.1 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/src/etc/man/cargo-yank.1 2020-07-17 20:39:39.000000000 +0000 @@ -2,12 +2,12 @@ .\" Title: cargo-yank .\" Author: [see the "AUTHOR(S)" section] .\" Generator: Asciidoctor 2.0.10 -.\" Date: 2019-06-03 +.\" Date: 2020-06-25 .\" Manual: \ \& .\" Source: \ \& .\" Language: English .\" -.TH "CARGO\-YANK" "1" "2019-06-03" "\ \&" "\ \&" +.TH "CARGO\-YANK" "1" "2020-06-25" "\ \&" "\ \&" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 @@ -48,7 +48,7 @@ If the crate name is not specified, it will use the package name from the current directory. .SH "OPTIONS" -.SS "Owner Options" +.SS "Yank Options" .sp \fB\-\-vers\fP \fIVERSION\fP .RS 4 @@ -144,6 +144,16 @@ .RE .SS "Common Options" .sp +\fB+TOOLCHAIN\fP +.RS 4 +If Cargo has been installed with rustup, and the first argument to \fBcargo\fP +begins with \fB+\fP, it will be interpreted as a rustup toolchain name (such +as \fB+stable\fP or \fB+nightly\fP). +See the \c +.URL "https://github.com/rust\-lang/rustup/" "rustup documentation" +for more information about how toolchain overrides work. +.RE +.sp \fB\-h\fP, \fB\-\-help\fP .RS 4 Prints help information. diff -Nru cargo-0.44.1/tests/build-std/main.rs cargo-0.47.0/tests/build-std/main.rs --- cargo-0.44.1/tests/build-std/main.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/build-std/main.rs 2020-07-17 20:39:39.000000000 +0000 @@ -109,6 +109,14 @@ p.cargo("build").build_std().target_host().run(); p.cargo("run").build_std().target_host().run(); p.cargo("test").build_std().target_host().run(); + + // Check for hack that removes dylibs. + let deps_dir = Path::new("target") + .join(rustc_host()) + .join("debug") + .join("deps"); + assert!(p.glob(deps_dir.join("*.rlib")).count() > 0); + assert_eq!(p.glob(deps_dir.join("*.dylib")).count(), 0); } #[cargo_test(build_std)] diff -Nru cargo-0.44.1/tests/internal.rs cargo-0.47.0/tests/internal.rs --- cargo-0.44.1/tests/internal.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/tests/internal.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,96 @@ +//! Tests for internal code checks. +use std::fs; + +#[test] +fn check_forbidden_code() { + // Do not use certain macros, functions, etc. + if !cargo::util::is_ci() { + // Only check these on CI, otherwise it could be annoying. + return; + } + let path = std::path::Path::new(env!("CARGO_MANIFEST_DIR")).join("src"); + for entry in walkdir::WalkDir::new(path) + .into_iter() + .filter_map(|e| e.ok()) + { + let path = entry.path(); + if !entry + .file_name() + .to_str() + .map(|s| s.ends_with(".rs")) + .unwrap_or(false) + { + continue; + } + let c = fs::read_to_string(path).unwrap(); + for (line_index, line) in c.lines().enumerate() { + if line.trim().starts_with("//") { + continue; + } + if line_has_print(line) { + if entry.file_name().to_str().unwrap() == "cargo_new.rs" && line.contains("Hello") { + // An exception. + continue; + } + panic!( + "found print macro in {}:{}\n\n{}\n\n\ + print! macros should not be used in Cargo because they can panic.\n\ + Use one of the drop_print macros instead.\n\ + ", + path.display(), + line_index, + line + ); + } + if line_has_macro(line, "dbg") { + panic!( + "found dbg! macro in {}:{}\n\n{}\n\n\ + dbg! should not be used outside of debugging.", + path.display(), + line_index, + line + ); + } + } + } +} + +fn line_has_print(line: &str) -> bool { + line_has_macro(line, "print") + || line_has_macro(line, "eprint") + || line_has_macro(line, "println") + || line_has_macro(line, "eprintln") +} + +#[test] +fn line_has_print_works() { + assert!(line_has_print("print!")); + assert!(line_has_print("println!")); + assert!(line_has_print("eprint!")); + assert!(line_has_print("eprintln!")); + assert!(line_has_print("(print!(\"hi!\"))")); + assert!(!line_has_print("print")); + assert!(!line_has_print("i like to print things")); + assert!(!line_has_print("drop_print!")); + assert!(!line_has_print("drop_println!")); + assert!(!line_has_print("drop_eprint!")); + assert!(!line_has_print("drop_eprintln!")); +} + +fn line_has_macro(line: &str, mac: &str) -> bool { + for (i, _) in line.match_indices(mac) { + if line.get(i + mac.len()..i + mac.len() + 1) != Some("!") { + continue; + } + if i == 0 { + return true; + } + // Check for identifier boundary start. + let prev1 = line.get(i - 1..i).unwrap().chars().next().unwrap(); + if prev1.is_alphanumeric() || prev1 == '_' { + continue; + } + return true; + } + false +} diff -Nru cargo-0.44.1/tests/testsuite/alt_registry.rs cargo-0.47.0/tests/testsuite/alt_registry.rs --- cargo-0.44.1/tests/testsuite/alt_registry.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/alt_registry.rs 2020-07-17 20:39:39.000000000 +0000 @@ -4,8 +4,7 @@ use cargo_test_support::publish::validate_alt_upload; use cargo_test_support::registry::{self, Package}; use cargo_test_support::{basic_manifest, git, paths, project}; -use std::fs::{self, File}; -use std::io::Write; +use std::fs; #[cargo_test] fn depend_on_alt_registry() { @@ -299,7 +298,7 @@ .with_stderr_contains("[ERROR] crates cannot be published to crates.io[..]") .run(); - p.cargo("publish --index") + p.cargo("publish --token sekrit --index") .arg(fakeio_url.to_string()) .with_status(101) .with_stderr_contains("[ERROR] crates cannot be published to crates.io[..]") @@ -414,17 +413,18 @@ #[cargo_test] fn block_publish_due_to_no_token() { - let p = project().file("src/main.rs", "fn main() {}").build(); - - // Setup the registry by publishing a package - Package::new("bar", "0.0.1").alternative(true).publish(); + registry::init(); + let p = project().file("src/lib.rs", "").build(); fs::remove_file(paths::home().join(".cargo/credentials")).unwrap(); // Now perform the actual publish p.cargo("publish --registry alternative") .with_status(101) - .with_stderr_contains("error: no upload token found, please run `cargo login`") + .with_stderr_contains( + "error: no upload token found, \ + please run `cargo login` or pass `--token`", + ) .run(); } @@ -530,50 +530,32 @@ } #[cargo_test] -fn passwords_in_registry_index_url_forbidden() { - registry::init(); - - let config = paths::home().join(".cargo/config"); - - File::create(config) - .unwrap() - .write_all( - br#" - [registry] - index = "ssh://git:secret@foobar.com" - "#, - ) - .unwrap(); - - let p = project().file("src/main.rs", "fn main() {}").build(); - - p.cargo("publish") - .with_status(101) - .with_stderr_contains("error: Registry URLs may not contain passwords") - .run(); -} - -#[cargo_test] fn passwords_in_registries_index_url_forbidden() { registry::init(); let config = paths::home().join(".cargo/config"); - File::create(config) - .unwrap() - .write_all( - br#" + fs::write( + config, + r#" [registries.alternative] index = "ssh://git:secret@foobar.com" "#, - ) - .unwrap(); + ) + .unwrap(); let p = project().file("src/main.rs", "fn main() {}").build(); p.cargo("publish --registry alternative") .with_status(101) - .with_stderr_contains("error: Registry URLs may not contain passwords") + .with_stderr( + "\ +error: invalid index URL for registry `alternative` defined in [..]/home/.cargo/config + +Caused by: + registry URLs may not contain passwords +", + ) .run(); } @@ -644,7 +626,7 @@ [ERROR] failed to parse manifest at `[CWD]/Cargo.toml` Caused by: - Invalid character ` ` in registry name: `bad name`", + invalid character ` ` in registry name: `bad name`, [..]", ) .run(); @@ -661,7 +643,7 @@ .arg("--registry") .arg("bad name") .with_status(101) - .with_stderr("[ERROR] Invalid character ` ` in registry name: `bad name`") + .with_stderr("[ERROR] invalid character ` ` in registry name: `bad name`, [..]") .run(); } } @@ -842,7 +824,8 @@ "resolve": null, "target_directory": "[..]/foo/target", "version": 1, - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null }"#, ) .run(); @@ -1021,7 +1004,8 @@ "resolve": "{...}", "target_directory": "[..]/foo/target", "version": 1, - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null }"#, ) .run(); @@ -1170,7 +1154,8 @@ "resolve": "{...}", "target_directory": "[..]/foo/target", "version": 1, - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null } "#, ) @@ -1181,15 +1166,14 @@ fn registries_index_relative_url() { let config = paths::root().join(".cargo/config"); fs::create_dir_all(config.parent().unwrap()).unwrap(); - File::create(&config) - .unwrap() - .write_all( - br#" + fs::write( + &config, + r#" [registries.relative] index = "file:alternative-registry" "#, - ) - .unwrap(); + ) + .unwrap(); registry::init(); @@ -1228,70 +1212,17 @@ } #[cargo_test] -fn registry_index_relative_url() { - let config = paths::root().join(".cargo/config"); - fs::create_dir_all(config.parent().unwrap()).unwrap(); - File::create(&config) - .unwrap() - .write_all( - br#" - [registry] - index = "file:alternative-registry" - "#, - ) - .unwrap(); - - registry::init(); - - let p = project() - .file( - "Cargo.toml", - r#" - [project] - name = "foo" - version = "0.0.1" - authors = [] - - [dependencies.bar] - version = "0.0.1" - "#, - ) - .file("src/main.rs", "fn main() {}") - .build(); - - Package::new("bar", "0.0.1").alternative(true).publish(); - - fs::remove_file(paths::home().join(".cargo/config")).unwrap(); - - p.cargo("build") - .with_stderr(&format!( - "\ -warning: custom registry support via the `registry.index` configuration is being removed, this functionality will not work in the future -[UPDATING] `{reg}` index -[DOWNLOADING] crates ... -[DOWNLOADED] bar v0.0.1 (registry `[ROOT][..]`) -[COMPILING] bar v0.0.1 (registry `[ROOT][..]`) -[COMPILING] foo v0.0.1 ([CWD]) -[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]s -", - reg = registry::alt_registry_path().to_str().unwrap() - )) - .run(); -} - -#[cargo_test] fn registries_index_relative_path_not_allowed() { let config = paths::root().join(".cargo/config"); fs::create_dir_all(config.parent().unwrap()).unwrap(); - File::create(&config) - .unwrap() - .write_all( - br#" + fs::write( + &config, + r#" [registries.relative] index = "alternative-registry" "#, - ) - .unwrap(); + ) + .unwrap(); registry::init(); @@ -1320,6 +1251,9 @@ error: failed to parse manifest at `{root}/foo/Cargo.toml` Caused by: + invalid index URL for registry `relative` defined in [..]/.cargo/config + +Caused by: invalid url `alternative-registry`: relative URL without a base ", root = paths::root().to_str().unwrap() @@ -1327,3 +1261,19 @@ .with_status(101) .run(); } + +#[cargo_test] +fn both_index_and_registry() { + let p = project().file("src/lib.rs", "").build(); + for cmd in &["publish", "owner", "search", "yank --vers 1.0.0"] { + p.cargo(cmd) + .arg("--registry=foo") + .arg("--index=foo") + .with_status(101) + .with_stderr( + "[ERROR] both `--index` and `--registry` \ + should not be set at the same time", + ) + .run(); + } +} diff -Nru cargo-0.44.1/tests/testsuite/bad_config.rs cargo-0.47.0/tests/testsuite/bad_config.rs --- cargo-0.44.1/tests/testsuite/bad_config.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/bad_config.rs 2020-07-17 20:39:39.000000000 +0000 @@ -862,6 +862,36 @@ } #[cargo_test] +fn fragment_in_git_url() { + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.0.0" + authors = [] + + [dependencies.bar] + git = "http://127.0.0.1#foo" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("build -v") + .with_status(101) + .with_stderr_contains( + "\ +[WARNING] URL fragment `#foo` in git URL is ignored for dependency (bar). \ +If you were trying to specify a specific git revision, \ +use `rev = \"foo\"` in the dependency declaration. +", + ) + .run(); +} + +#[cargo_test] fn bad_source_config1() { let p = project() .file("src/lib.rs", "") @@ -1368,8 +1398,8 @@ Caused by: invalid configuration for key `target.cfg(not(target_os = \"none\")).runner` -expected a string or array of strings, but found a boolean for \ -`target.cfg(not(target_os = \"none\")).runner` in [..]/foo/.cargo/config + expected a string or array of strings, but found a boolean for \ + `target.cfg(not(target_os = \"none\")).runner` in [..]/foo/.cargo/config ", ) .run(); diff -Nru cargo-0.44.1/tests/testsuite/bench.rs cargo-0.47.0/tests/testsuite/bench.rs --- cargo-0.44.1/tests/testsuite/bench.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/bench.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1612,9 +1612,9 @@ .with_json( r#" { - "executable": "[..]/foo/target/release/benchmark-[..][EXE]", + "executable": "[..]/foo/target/release/deps/benchmark-[..][EXE]", "features": [], - "filenames": [ "[..]/foo/target/release/benchmark-[..][EXE]" ], + "filenames": "{...}", "fresh": false, "package_id": "foo 0.0.1 ([..])", "profile": "{...}", @@ -1628,6 +1628,8 @@ "src_path": "[..]/foo/benches/benchmark.rs" } } + + {"reason": "build-finished", "success": true} "#, ) .run(); diff -Nru cargo-0.44.1/tests/testsuite/build_plan.rs cargo-0.47.0/tests/testsuite/build_plan.rs --- cargo-0.44.1/tests/testsuite/build_plan.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/build_plan.rs 2020-07-17 20:39:39.000000000 +0000 @@ -24,7 +24,7 @@ "cwd": "[..]/cit/[..]/foo", "deps": [], "env": "{...}", - "kind": "Host", + "kind": null, "links": "{...}", "outputs": "{...}", "package_name": "foo", @@ -84,7 +84,7 @@ "cwd": "[..]/cit/[..]/foo", "deps": [], "env": "{...}", - "kind": "Host", + "kind": null, "links": "{...}", "outputs": [ "[..]/foo/target/debug/deps/libbar-[..].rlib", @@ -101,7 +101,7 @@ "cwd": "[..]/cit/[..]/foo", "deps": [0], "env": "{...}", - "kind": "Host", + "kind": null, "links": "{...}", "outputs": [ "[..]/foo/target/debug/deps/libfoo-[..].rlib", @@ -152,11 +152,9 @@ "cwd": "[..]/cit/[..]/foo", "deps": [], "env": "{...}", - "kind": "Host", + "kind": null, "links": "{...}", - "outputs": [ - "[..]/foo/target/debug/build/[..]/build_script_build-[..]" - ], + "outputs": "{...}", "package_name": "foo", "package_version": "0.5.0", "program": "rustc", @@ -168,7 +166,7 @@ "cwd": "[..]/cit/[..]/foo", "deps": [0], "env": "{...}", - "kind": "Host", + "kind": null, "links": "{...}", "outputs": [], "package_name": "foo", @@ -182,7 +180,7 @@ "cwd": "[..]/cit/[..]/foo", "deps": [1], "env": "{...}", - "kind": "Host", + "kind": null, "links": "{...}", "outputs": "{...}", "package_name": "foo", diff -Nru cargo-0.44.1/tests/testsuite/build.rs cargo-0.47.0/tests/testsuite/build.rs --- cargo-0.44.1/tests/testsuite/build.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/build.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,15 +1,19 @@ //! Tests for the `cargo build` command. -use cargo::util::paths::dylib_path_envvar; +use cargo::{ + core::compiler::CompileMode, core::Workspace, ops::CompileOptions, + util::paths::dylib_path_envvar, Config, +}; use cargo_test_support::paths::{root, CargoPathExt}; use cargo_test_support::registry::Package; use cargo_test_support::{ - basic_bin_manifest, basic_lib_manifest, basic_manifest, main_file, project, rustc_host, - sleep_ms, symlink_supported, t, Execs, ProjectBuilder, + basic_bin_manifest, basic_lib_manifest, basic_manifest, lines_match, main_file, project, + rustc_host, sleep_ms, symlink_supported, t, Execs, ProjectBuilder, }; use std::env; -use std::fs::{self, File}; -use std::io::prelude::*; +use std::fs; +use std::io::Read; +use std::process::Stdio; #[cargo_test] fn cargo_compile_simple() { @@ -299,7 +303,7 @@ [ERROR] failed to parse manifest at `[..]` Caused by: - Invalid character `:` in package name: `foo::bar` + invalid character `:` in package name: `foo::bar`, [..] ", ) .run(); @@ -399,6 +403,55 @@ } #[cargo_test] +fn cargo_compile_api_exposes_artifact_paths() { + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + authors = [] + version = "0.0.0" + + [[bin]] + name = "the_foo_bin" + path = "src/bin.rs" + + [lib] + name = "the_foo_lib" + path = "src/foo.rs" + crate-type = ["cdylib", "rlib"] + "#, + ) + .file("src/foo.rs", "pub fn bar() {}") + .file("src/bin.rs", "pub fn main() {}") + .build(); + + let config = Config::default().unwrap(); + let ws = Workspace::new(&p.root().join("Cargo.toml"), &config).unwrap(); + let compile_options = CompileOptions::new(ws.config(), CompileMode::Build).unwrap(); + + let result = cargo::ops::compile(&ws, &compile_options).unwrap(); + + assert_eq!(1, result.binaries.len()); + assert!(result.binaries[0].1.exists()); + assert!(result.binaries[0] + .1 + .to_str() + .unwrap() + .contains("the_foo_bin")); + + assert_eq!(1, result.cdylibs.len()); + // The exact library path varies by platform, but should certainly exist at least + assert!(result.cdylibs[0].1.exists()); + assert!(result.cdylibs[0] + .1 + .to_str() + .unwrap() + .contains("the_foo_lib")); +} + +#[cargo_test] fn cargo_compile_with_bin_and_proc() { let p = project() .file( @@ -911,6 +964,37 @@ .run(); } +// Ensure that renamed deps have a valid name +#[cargo_test] +fn cargo_compile_with_invalid_dep_rename() { + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "buggin" + version = "0.1.0" + + [dependencies] + "haha this isn't a valid name 🐛" = { package = "libc", version = "0.1" } + "#, + ) + .file("src/main.rs", &main_file(r#""What's good?""#, &[])) + .build(); + + p.cargo("build") + .with_status(101) + .with_stderr( + "\ +error: failed to parse manifest at `[..]` + +Caused by: + invalid character ` ` in dependency name: `haha this isn't a valid name 🐛`, characters must be Unicode XID characters (numbers, `-`, `_`, or most letters) +", + ) + .run(); +} + #[cargo_test] fn cargo_compile_with_filename() { let p = project() @@ -993,7 +1077,7 @@ error: failed to select a version for `bad`. ... required by package `qux v0.1.0` ... which is depended on by `foo v0.0.1 ([..])` -versions that meet the requirements `>= 1.0.1` are: 1.0.2, 1.0.1 +versions that meet the requirements `>=1.0.1` are: 1.0.2, 1.0.1 all possible versions conflict with previously selected packages. @@ -1038,7 +1122,7 @@ "\ error: failed to select a version for `bad`. ... required by package `foo v0.0.1 ([..])` -versions that meet the requirements `>= 1.0.1, <= 2.0.0` are: 2.0.0, 1.0.1 +versions that meet the requirements `>=1.0.1, <=2.0.0` are: 2.0.0, 1.0.1 all possible versions conflict with previously selected packages. @@ -1077,10 +1161,7 @@ p.cargo("build").run(); - File::create(&p.root().join("bar/Cargo.toml")) - .unwrap() - .write_all(basic_manifest("bar", "0.0.2").as_bytes()) - .unwrap(); + p.change_file("bar/Cargo.toml", &basic_manifest("bar", "0.0.2")); p.cargo("build").run(); } @@ -1094,17 +1175,8 @@ p.cargo("build").run(); - let lockfile = p.root().join("Cargo.lock"); - let mut lock = String::new(); - File::open(&lockfile) - .unwrap() - .read_to_string(&mut lock) - .unwrap(); - let lock = lock.replace("\n", "\r\n"); - File::create(&lockfile) - .unwrap() - .write_all(lock.as_bytes()) - .unwrap(); + let lock = p.read_lockfile(); + p.change_file("Cargo.lock", &lock.replace("\n", "\r\n")); p.cargo("build").run(); } @@ -1149,13 +1221,13 @@ [COMPILING] bar v0.0.1 ([CWD]/bar) [RUNNING] `rustc --crate-name bar bar/src/lib.rs [..]--crate-type dylib \ --emit=[..]link \ - -C prefer-dynamic -C debuginfo=2 \ + -C prefer-dynamic[..]-C debuginfo=2 \ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/debug/deps` [COMPILING] foo v0.0.1 ([CWD]) [RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ - --emit=[..]link -C debuginfo=2 \ + --emit=[..]link[..]-C debuginfo=2 \ -C metadata=[..] \ -C extra-filename=[..] \ --out-dir [..] \ @@ -1177,13 +1249,13 @@ [COMPILING] bar v0.0.1 ([CWD]/bar) [RUNNING] `rustc --crate-name bar bar/src/lib.rs [..]--crate-type dylib \ --emit=[..]link \ - -C prefer-dynamic -C debuginfo=2 \ + -C prefer-dynamic[..]-C debuginfo=2 \ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/debug/deps` [COMPILING] foo v0.0.1 ([CWD]) [RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ - --emit=[..]link -C debuginfo=2 \ + --emit=[..]link[..]-C debuginfo=2 \ -C metadata=[..] \ -C extra-filename=[..] \ --out-dir [..] \ @@ -1210,6 +1282,12 @@ homepage = "https://example.com" repository = "https://example.com/repo.git" authors = ["wycats@example.com"] + license = "MIT OR Apache-2.0" + license_file = "license.txt" + + [[bin]] + name = "foo-bar" + path = "src/main.rs" "#, ) .file( @@ -1227,7 +1305,12 @@ static PKG_NAME: &'static str = env!("CARGO_PKG_NAME"); static HOMEPAGE: &'static str = env!("CARGO_PKG_HOMEPAGE"); static REPOSITORY: &'static str = env!("CARGO_PKG_REPOSITORY"); + static LICENSE: &'static str = env!("CARGO_PKG_LICENSE"); + static LICENSE_FILE: &'static str = env!("CARGO_PKG_LICENSE_FILE"); static DESCRIPTION: &'static str = env!("CARGO_PKG_DESCRIPTION"); + static BIN_NAME: &'static str = env!("CARGO_BIN_NAME"); + static CRATE_NAME: &'static str = env!("CARGO_CRATE_NAME"); + fn main() { let s = format!("{}-{}-{} @ {} in {}", VERSION_MAJOR, @@ -1236,8 +1319,11 @@ assert_eq!(s, foo::version()); println!("{}", s); assert_eq!("foo", PKG_NAME); + assert_eq!("foo-bar", BIN_NAME); + assert_eq!("foo_bar", CRATE_NAME); assert_eq!("https://example.com", HOMEPAGE); assert_eq!("https://example.com/repo.git", REPOSITORY); + assert_eq!("MIT OR Apache-2.0", LICENSE); assert_eq!("This is foo", DESCRIPTION); let s = format!("{}.{}.{}-{}", VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH, VERSION_PRE); @@ -1264,7 +1350,7 @@ p.cargo("build -v").run(); println!("bin"); - p.process(&p.bin("foo")) + p.process(&p.bin("foo-bar")) .with_stdout("0-5-1 @ alpha.1 in [CWD]") .run(); @@ -1581,7 +1667,7 @@ "\ [COMPILING] foo v0.0.1 ([CWD]) [RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ - --emit=[..]link -C debuginfo=2 \ + --emit=[..]link[..]-C debuginfo=2 \ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/debug/deps` @@ -1599,8 +1685,8 @@ "\ [COMPILING] foo v0.0.1 ([CWD]) [RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ - --emit=[..]link \ - -C opt-level=3 \ + --emit=[..]link[..]\ + -C opt-level=3[..]\ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/release/deps` @@ -1650,15 +1736,15 @@ [RUNNING] `rustc --crate-name foo foo/src/lib.rs [..]\ --crate-type dylib --crate-type rlib \ --emit=[..]link \ - -C prefer-dynamic \ - -C opt-level=3 \ + -C prefer-dynamic[..]\ + -C opt-level=3[..]\ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/release/deps` [COMPILING] test v0.0.0 ([CWD]) [RUNNING] `rustc --crate-name test src/lib.rs [..]--crate-type lib \ - --emit=[..]link \ - -C opt-level=3 \ + --emit=[..]link[..]\ + -C opt-level=3[..]\ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/release/deps \ @@ -2133,7 +2219,7 @@ // Modify an ignored file and make sure we don't rebuild println!("second pass"); - File::create(&foo.root().join("src/bar.rs")).unwrap(); + foo.change_file("src/bar.rs", ""); foo.cargo("build").with_stdout("").run(); } @@ -2181,7 +2267,7 @@ ) .run(); - File::create(&foo.root().join("src/bar.rs")).unwrap(); + foo.change_file("src/bar.rs", ""); foo.cargo("build") .with_stderr( "\ @@ -2249,11 +2335,12 @@ let credentials = home().join(".cargo/credentials"); t!(fs::create_dir_all(credentials.parent().unwrap())); - t!(t!(File::create(&credentials)).write_all( - br#" - [registry] - token = "api-token" - "# + t!(fs::write( + &credentials, + r#" + [registry] + token = "api-token" + "# )); let stat = fs::metadata(credentials.as_path()).unwrap(); let mut perms = stat.permissions(); @@ -2414,12 +2501,7 @@ assert!(p.bin("foo").is_file()); p.process(&p.bin("foo")).run(); - let loc = p.root().join("Cargo.lock"); - let mut lockfile = String::new(); - File::open(&loc) - .unwrap() - .read_to_string(&mut lockfile) - .unwrap(); + let lockfile = p.read_lockfile(); assert!(lockfile.contains("bar")); } @@ -2837,16 +2919,13 @@ .run(); assert!(p.root().join("foo2/target/debug").join(&exe_name).is_file()); - fs::create_dir(p.root().join(".cargo")).unwrap(); - File::create(p.root().join(".cargo/config")) - .unwrap() - .write_all( - br#" - [build] - target-dir = "foo/target" - "#, - ) - .unwrap(); + p.change_file( + ".cargo/config", + r#" + [build] + target-dir = "foo/target" + "#, + ); p.cargo("build").env("CARGO_TARGET_DIR", "bar/target").run(); assert!(p.root().join("bar/target/debug").join(&exe_name).is_file()); assert!(p.root().join("foo/target/debug").join(&exe_name).is_file()); @@ -2867,16 +2946,13 @@ assert!(p.root().join("foo/target/debug").join(&exe_name).is_file()); assert!(p.root().join("target/debug").join(&exe_name).is_file()); - fs::create_dir(p.root().join(".cargo")).unwrap(); - File::create(p.root().join(".cargo/config")) - .unwrap() - .write_all( - br#" - [build] - target-dir = "foo/target" - "#, - ) - .unwrap(); + p.change_file( + ".cargo/config", + r#" + [build] + target-dir = "foo/target" + "#, + ); p.cargo("build --target-dir bar/target").run(); assert!(p.root().join("bar/target/debug").join(&exe_name).is_file()); assert!(p.root().join("foo/target/debug").join(&exe_name).is_file()); @@ -3169,6 +3245,8 @@ "filenames": "{...}", "fresh": $FRESH } + + {"reason": "build-finished", "success": true} "# .replace("$FRESH", fresh) }; @@ -3249,6 +3327,8 @@ "filenames": "{...}", "fresh": false } + + {"reason": "build-finished", "success": true} "#, ) .run(); @@ -3285,6 +3365,43 @@ } #[cargo_test] +fn no_warn_about_workspace_metadata() { + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["foo"] + + [workspace.metadata] + something = "something_else" + x = 1 + y = 2 + + [workspace.metadata.another] + bar = 12 + "#, + ) + .file( + "foo/Cargo.toml", + r#" + [package] + name = "foo" + version = "0.0.1" + "#, + ) + .file("foo/src/lib.rs", "") + .build(); + + p.cargo("build") + .with_stderr( + "[..] foo v0.0.1 ([..])\n\ + [FINISHED] dev [unoptimized + debuginfo] target(s) in [..]\n", + ) + .run(); +} + +#[cargo_test] fn cargo_build_empty_target() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) @@ -3701,6 +3818,42 @@ } #[cargo_test] +// NOTE: we don't have `/usr/bin/env` on Windows. +#[cfg(not(windows))] +fn rustc_workspace_wrapper() { + let p = project().file("src/lib.rs", "").build(); + p.cargo("build -v -Zunstable-options") + .env("RUSTC_WORKSPACE_WRAPPER", "/usr/bin/env") + .masquerade_as_nightly_cargo() + .with_stderr_contains("[RUNNING] `/usr/bin/env rustc --crate-name foo [..]") + .run(); +} + +#[cargo_test] +#[cfg(not(windows))] +fn rustc_workspace_wrapper_relative() { + let p = project().file("src/lib.rs", "").build(); + p.cargo("build -v -Zunstable-options") + .env("RUSTC_WORKSPACE_WRAPPER", "./sccache") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr_contains("[..]/foo/./sccache rustc[..]") + .run(); +} + +#[cargo_test] +#[cfg(not(windows))] +fn rustc_workspace_wrapper_from_path() { + let p = project().file("src/lib.rs", "").build(); + p.cargo("build -v -Zunstable-options") + .env("RUSTC_WORKSPACE_WRAPPER", "wannabe_sccache") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr_contains("[..]`wannabe_sccache rustc [..]") + .run(); +} + +#[cargo_test] fn cdylib_not_lifted() { let p = project() .file( @@ -3721,7 +3874,11 @@ p.cargo("build").run(); let files = if cfg!(windows) { - vec!["foo.dll.lib", "foo.dll.exp", "foo.dll"] + if cfg!(target_env = "msvc") { + vec!["foo.dll.lib", "foo.dll.exp", "foo.dll"] + } else { + vec!["libfoo.dll.a", "foo.dll"] + } } else if cfg!(target_os = "macos") { vec!["libfoo.dylib"] } else { @@ -3755,7 +3912,11 @@ p.cargo("build").run(); let files = if cfg!(windows) { - vec!["foo_bar.dll.lib", "foo_bar.dll"] + if cfg!(target_env = "msvc") { + vec!["foo_bar.dll.lib", "foo_bar.dll"] + } else { + vec!["foo_bar.dll", "libfoo_bar.dll.a"] + } } else if cfg!(target_os = "macos") { vec!["libfoo_bar.dylib"] } else { @@ -4091,7 +4252,7 @@ assert!(p.target_debug_dir().join("foo.dSYM").is_dir()); assert!(p.target_debug_dir().join("b.dSYM").is_dir()); assert!(p.target_debug_dir().join("b.dSYM").is_symlink()); - assert!(p.target_debug_dir().join("examples/c.dSYM").is_symlink()); + assert!(p.target_debug_dir().join("examples/c.dSYM").is_dir()); assert!(!p.target_debug_dir().join("c.dSYM").exists()); assert!(!p.target_debug_dir().join("d.dSYM").exists()); } @@ -4130,6 +4291,7 @@ let p = project() .file("src/main.rs", "fn main() { panic!(); }") .file("src/bin/b.rs", "fn main() { panic!(); }") + .file("src/bin/foo-bar.rs", "fn main() { panic!(); }") .file("examples/c.rs", "fn main() { panic!(); }") .file("tests/d.rs", "fn main() { panic!(); }") .build(); @@ -4138,6 +4300,8 @@ assert!(p.target_debug_dir().join("foo.pdb").is_file()); assert!(p.target_debug_dir().join("b.pdb").is_file()); assert!(p.target_debug_dir().join("examples/c.pdb").exists()); + assert!(p.target_debug_dir().join("foo-bar.exe").is_file()); + assert!(p.target_debug_dir().join("foo_bar.pdb").is_file()); assert!(!p.target_debug_dir().join("c.pdb").exists()); assert!(!p.target_debug_dir().join("d.pdb").exists()); } @@ -4169,15 +4333,15 @@ p.cargo("build -v --test=t1") .with_stderr_contains( "[RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ - --emit=[..]link -C debuginfo=2 [..]", + --emit=[..]link[..]-C debuginfo=2 [..]", ) .with_stderr_contains( - "[RUNNING] `rustc --crate-name t1 tests/t1.rs [..]--emit=[..]link \ + "[RUNNING] `rustc --crate-name t1 tests/t1.rs [..]--emit=[..]link[..]\ -C debuginfo=2 [..]", ) .with_stderr_contains( "[RUNNING] `rustc --crate-name foo src/main.rs [..]--crate-type bin \ - --emit=[..]link -C debuginfo=2 [..]", + --emit=[..]link[..]-C debuginfo=2 [..]", ) .run(); @@ -4186,16 +4350,16 @@ p.cargo("build -v --bench=b1") .with_stderr_contains( "[RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ - --emit=[..]link -C debuginfo=2 [..]", + --emit=[..]link[..]-C debuginfo=2 [..]", ) .with_stderr_contains( - "[RUNNING] `rustc --crate-name b1 benches/b1.rs [..]--emit=[..]link \ + "[RUNNING] `rustc --crate-name b1 benches/b1.rs [..]--emit=[..]link[..]\ -C debuginfo=2 [..]", ) .with_stderr_does_not_contain("opt-level") .with_stderr_contains( "[RUNNING] `rustc --crate-name foo src/main.rs [..]--crate-type bin \ - --emit=[..]link -C debuginfo=2 [..]", + --emit=[..]link[..]-C debuginfo=2 [..]", ) .run(); } @@ -4216,7 +4380,7 @@ ) // Unit tests. .with_stderr_does_not_contain( - "[RUNNING] `rustc --crate-name foo src/main.rs [..]--emit=[..]link \ + "[RUNNING] `rustc --crate-name foo src/main.rs [..]--emit=[..]link[..]\ -C debuginfo=2 --test [..]", ) .run(); @@ -4233,7 +4397,7 @@ ) // Unit tests. .with_stderr_contains( - "[RUNNING] `rustc --crate-name foo src/main.rs [..]--emit=[..]link \ + "[RUNNING] `rustc --crate-name foo src/main.rs [..]--emit=[..]link[..]\ -C debuginfo=2 --test [..]", ) .run(); @@ -4250,7 +4414,7 @@ ) // Unit tests. .with_stderr_contains( - "[RUNNING] `rustc --crate-name foo src/main.rs [..]--emit=[..]link \ + "[RUNNING] `rustc --crate-name foo src/main.rs [..]--emit=[..]link[..]\ -C debuginfo=2 --test [..]", ) .run(); @@ -4690,7 +4854,7 @@ "\ [COMPILING] foo v0.0.1 ([CWD]) [RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ - --emit=[..]link -C debuginfo=2 \ + --emit=[..]link[..]-C debuginfo=2 \ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/debug/deps` @@ -4759,3 +4923,183 @@ .run(); p.process(&p.bin("foo")).run(); } + +#[cargo_test] +fn close_output() { + // What happens when stdout or stderr is closed during a build. + + // Server to know when rustc has spawned. + let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = listener.local_addr().unwrap(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + edition = "2018" + + [lib] + proc-macro = true + + [[bin]] + name = "foobar" + "#, + ) + .file( + "src/lib.rs", + &r#" + use proc_macro::TokenStream; + use std::io::Read; + + #[proc_macro] + pub fn repro(_input: TokenStream) -> TokenStream { + println!("hello stdout!"); + eprintln!("hello stderr!"); + // Tell the test we have started. + let mut socket = std::net::TcpStream::connect("__ADDR__").unwrap(); + // Wait for the test to tell us to start printing. + let mut buf = [0]; + drop(socket.read_exact(&mut buf)); + let use_stderr = std::env::var("__CARGO_REPRO_STDERR").is_ok(); + for i in 0..10000 { + if use_stderr { + eprintln!("{}", i); + } else { + println!("{}", i); + } + } + TokenStream::new() + } + "# + .replace("__ADDR__", &addr.to_string()), + ) + .file( + "src/bin/foobar.rs", + r#" + foo::repro!(); + + fn main() {} + "#, + ) + .build(); + + // The `stderr` flag here indicates if this should forcefully close stderr or stdout. + let spawn = |stderr: bool| { + let mut cmd = p.cargo("build").build_command(); + cmd.stdout(Stdio::piped()).stderr(Stdio::piped()); + if stderr { + cmd.env("__CARGO_REPRO_STDERR", "1"); + } + let mut child = cmd.spawn().unwrap(); + // Wait for proc macro to start. + let pm_conn = listener.accept().unwrap().0; + // Close stderr or stdout. + if stderr { + drop(child.stderr.take()); + } else { + drop(child.stdout.take()); + } + // Tell the proc-macro to continue; + drop(pm_conn); + // Read the output from the other channel. + let out: &mut dyn Read = if stderr { + child.stdout.as_mut().unwrap() + } else { + child.stderr.as_mut().unwrap() + }; + let mut result = String::new(); + out.read_to_string(&mut result).unwrap(); + let status = child.wait().unwrap(); + assert!(!status.success()); + result + }; + + let stderr = spawn(false); + assert!( + lines_match( + "\ +[COMPILING] foo [..] +hello stderr! +[ERROR] [..] +[WARNING] build failed, waiting for other jobs to finish... +[ERROR] build failed +", + &stderr, + ), + "lines differ:\n{}", + stderr + ); + + // Try again with stderr. + p.build_dir().rm_rf(); + let stdout = spawn(true); + assert!( + lines_match("hello stdout!\n", &stdout), + "lines differ:\n{}", + stdout + ); +} + +use cargo_test_support::registry::Dependency; + +#[cargo_test] +fn reduced_reproduction_8249() { + // https://github.com/rust-lang/cargo/issues/8249 + Package::new("a-src", "0.1.0").links("a").publish(); + Package::new("a-src", "0.2.0").links("a").publish(); + + Package::new("b", "0.1.0") + .add_dep(Dependency::new("a-src", "0.1").optional(true)) + .publish(); + Package::new("b", "0.2.0") + .add_dep(Dependency::new("a-src", "0.2").optional(true)) + .publish(); + + Package::new("c", "1.0.0") + .add_dep(&Dependency::new("b", "0.1.0")) + .publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + b = { version = "*", features = ["a-src"] } + a-src = "*" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("generate-lockfile").run(); + cargo::util::paths::append(&p.root().join("Cargo.toml"), b"c = \"*\"").unwrap(); + p.cargo("check").run(); + p.cargo("check").run(); +} + +#[cargo_test] +fn target_directory_backup_exclusion() { + let p = project() + .file("Cargo.toml", &basic_bin_manifest("foo")) + .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) + .build(); + + // Newly created target/ should have CACHEDIR.TAG inside... + p.cargo("build").run(); + let cachedir_tag = p.build_dir().join("CACHEDIR.TAG"); + assert!(cachedir_tag.is_file()); + assert!(fs::read_to_string(&cachedir_tag) + .unwrap() + .starts_with("Signature: 8a477f597d28d172789f06886806bc55")); + // ...but if target/ already exists CACHEDIR.TAG should not be created in it. + fs::remove_file(&cachedir_tag).unwrap(); + p.cargo("build").run(); + assert!(!&cachedir_tag.is_file()); +} diff -Nru cargo-0.44.1/tests/testsuite/build_script_env.rs cargo-0.47.0/tests/testsuite/build_script_env.rs --- cargo-0.44.1/tests/testsuite/build_script_env.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/build_script_env.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,7 +1,5 @@ //! Tests for build.rs rerun-if-env-changed. -use std::fs::File; - use cargo_test_support::project; use cargo_test_support::sleep_ms; @@ -97,7 +95,7 @@ .with_stderr("[FINISHED] [..]") .run(); sleep_ms(1000); - File::create(p.root().join("foo")).unwrap(); + p.change_file("foo", ""); p.cargo("build") .env("FOO", "bar") .with_stderr( diff -Nru cargo-0.44.1/tests/testsuite/build_script.rs cargo-0.47.0/tests/testsuite/build_script.rs --- cargo-0.44.1/tests/testsuite/build_script.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/build_script.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,9 +1,8 @@ //! Tests for build.rs scripts. use std::env; -use std::fs::{self, File}; +use std::fs; use std::io; -use std::io::prelude::*; use std::thread; use cargo::util::paths::remove_dir_all; @@ -85,7 +84,6 @@ use std::env; use std::io::prelude::*; use std::path::Path; - use std::fs; fn main() {{ let _target = env::var("TARGET").unwrap(); @@ -103,7 +101,7 @@ let out = env::var("OUT_DIR").unwrap(); assert!(out.starts_with(r"{0}")); - assert!(fs::metadata(&out).map(|m| m.is_dir()).unwrap_or(false)); + assert!(Path::new(&out).is_dir()); let _host = env::var("HOST").unwrap(); @@ -696,7 +694,7 @@ p.cargo("build -v").run(); p.root().move_into_the_past(); - File::create(&p.root().join("some-new-file")).unwrap(); + p.change_file("some-new-file", ""); p.root().move_into_the_past(); p.cargo("build -v") @@ -774,7 +772,7 @@ p.cargo("build -v").run(); p.root().move_into_the_past(); - File::create(&p.root().join("some-new-file")).unwrap(); + p.change_file("some-new-file", ""); p.root().move_into_the_past(); p.cargo("build -v").run(); @@ -801,7 +799,7 @@ p.cargo("build -v").run(); p.root().move_into_the_past(); - File::create(&p.root().join("src/lib.rs")).unwrap(); + p.change_file("src/lib.rs", ""); p.root().move_into_the_past(); println!("test"); @@ -831,10 +829,7 @@ ) .run(); - File::create(&p.root().join("src/main.rs")) - .unwrap() - .write_all(b"fn main() {}") - .unwrap(); + p.change_file("src/main.rs", "fn main() {}"); println!("run"); p.cargo("run") .with_stderr( @@ -1139,19 +1134,19 @@ [RUNNING] `rustc [..] a/build.rs [..] --extern b=[..]` [RUNNING] `[..]/a-[..]/build-script-build` [RUNNING] `rustc --crate-name a [..]lib.rs [..]--crate-type lib \ - --emit=[..]link -C debuginfo=2 \ + --emit=[..]link[..]-C debuginfo=2 \ -C metadata=[..] \ --out-dir [..]target/debug/deps \ -L [..]target/debug/deps` [COMPILING] foo v0.5.0 ([CWD]) [RUNNING] `rustc --crate-name build_script_build build.rs [..]--crate-type bin \ - --emit=[..]link \ + --emit=[..]link[..]\ -C debuginfo=2 -C metadata=[..] --out-dir [..] \ -L [..]target/debug/deps \ --extern a=[..]liba[..].rlib` [RUNNING] `[..]/foo-[..]/build-script-build` [RUNNING] `rustc --crate-name foo [..]lib.rs [..]--crate-type lib \ - --emit=[..]link -C debuginfo=2 \ + --emit=[..]link[..]-C debuginfo=2 \ -C metadata=[..] \ --out-dir [..] \ -L [..]target/debug/deps` @@ -1191,31 +1186,54 @@ // Make the file p.cargo("build -v").run(); - p.root().move_into_the_past(); // Change to asserting that it's there - File::create(&p.root().join("build.rs")) - .unwrap() - .write_all( - br#" - use std::env; - use std::old_io::File; - fn main() { - let out = env::var("OUT_DIR").unwrap(); - File::open(&Path::new(&out).join("foo")).unwrap(); - } - "#, + p.change_file( + "build.rs", + r#" + use std::env; + use std::fs::File; + use std::path::Path; + fn main() { + let out = env::var("OUT_DIR").unwrap(); + File::open(&Path::new(&out).join("foo")).unwrap(); + } + "#, + ); + p.cargo("build -v") + .with_stderr( + "\ +[COMPILING] foo [..] +[RUNNING] `rustc --crate-name build_script_build [..] +[RUNNING] `[..]/build-script-build` +[RUNNING] `rustc --crate-name foo [..] +[FINISHED] [..] +", ) - .unwrap(); - p.root().move_into_the_past(); - p.cargo("build -v").run(); + .run(); // Run a fresh build where file should be preserved - p.cargo("build -v").run(); + p.cargo("build -v") + .with_stderr( + "\ +[FRESH] foo [..] +[FINISHED] [..] +", + ) + .run(); // One last time to make sure it's still there. - File::create(&p.root().join("foo")).unwrap(); - p.cargo("build -v").run(); + p.change_file("foo", ""); + p.cargo("build -v") + .with_stderr( + "\ +[COMPILING] foo [..] +[RUNNING] `[..]build-script-build` +[RUNNING] `rustc --crate-name foo [..] +[FINISHED] [..] +", + ) + .run(); } #[cargo_test] @@ -1322,18 +1340,18 @@ "build.rs", r#" use std::env; - use std::fs::File; - use std::io::prelude::*; + use std::fs; use std::path::PathBuf; fn main() { let dst = PathBuf::from(env::var("OUT_DIR").unwrap()); - let mut f = File::create(&dst.join("hello.rs")).unwrap(); - f.write_all(b" + fs::write(dst.join("hello.rs"), + " pub fn message() -> &'static str { \"Hello, World!\" } - ").unwrap(); + ") + .unwrap(); } "#, ) @@ -1537,15 +1555,12 @@ "build.rs", r#" use std::env; - use std::io::prelude::*; - use std::fs::File; + use std::fs; use std::path::PathBuf; fn main() { let out = PathBuf::from(env::var("OUT_DIR").unwrap()); - File::create(out.join("foo.rs")).unwrap().write_all(b" - fn foo() -> i32 { 1 } - ").unwrap(); + fs::write(out.join("foo.rs"), "fn foo() -> i32 { 1 }").unwrap(); } "#, ) @@ -1648,7 +1663,7 @@ let src = root.join(&file); let dst = out_dir.join(&file); fs::copy(src, dst).unwrap(); - if cfg!(windows) { + if cfg!(target_env = "msvc") { fs::copy(root.join("builder.dll.lib"), out_dir.join("builder.dll.lib")).unwrap(); } @@ -2398,19 +2413,16 @@ ) .run(); - File::create(p.root().join(".cargo/config")) - .unwrap() - .write_all( - format!( - " - [target.{}.foo] - rustc-link-search = [\"native=bar\"] - ", - target - ) - .as_bytes(), - ) - .unwrap(); + p.change_file( + ".cargo/config", + &format!( + " + [target.{}.foo] + rustc-link-search = [\"native=bar\"] + ", + target + ), + ); p.cargo("build -v") .with_stderr( @@ -2462,19 +2474,16 @@ ) .run(); - File::create(p.root().join(".cargo/config")) - .unwrap() - .write_all( - format!( - " - [target.{}.foo] - rustc-link-search = [\"native=bar\"] - ", - target - ) - .as_bytes(), - ) - .unwrap(); + p.change_file( + ".cargo/config", + &format!( + " + [target.{}.foo] + rustc-link-search = [\"native=bar\"] + ", + target + ), + ); p.cargo("build -v") .with_stderr( @@ -2635,8 +2644,8 @@ .run(); sleep_ms(1000); - File::create(p.root().join("foo")).unwrap(); - File::create(p.root().join("bar")).unwrap(); + p.change_file("foo", ""); + p.change_file("bar", ""); sleep_ms(1000); // make sure the to-be-created outfile has a timestamp distinct from the infiles // now the exist, so run once, catch the mtime, then shouldn't run again @@ -2666,7 +2675,7 @@ // random other files do not affect freshness println!("run baz"); - File::create(p.root().join("baz")).unwrap(); + p.change_file("baz", ""); p.cargo("build -v") .with_stderr( "\ @@ -2678,7 +2687,7 @@ // but changing dependent files does println!("run foo change"); - File::create(p.root().join("foo")).unwrap(); + p.change_file("foo", ""); p.cargo("build -v") .with_stderr( "\ @@ -2981,6 +2990,39 @@ } #[cargo_test] +fn warnings_emitted_when_build_script_panics() { + let p = project() + .file( + "Cargo.toml", + r#" + [project] + name = "foo" + version = "0.5.0" + authors = [] + build = "build.rs" + "#, + ) + .file("src/lib.rs", "") + .file( + "build.rs", + r#" + fn main() { + println!("cargo:warning=foo"); + println!("cargo:warning=bar"); + panic!(); + } + "#, + ) + .build(); + + p.cargo("build") + .with_status(101) + .with_stdout("") + .with_stderr_contains("warning: foo\nwarning: bar") + .run(); +} + +#[cargo_test] fn warnings_hidden_for_upstream() { Package::new("bar", "0.1.0") .file( @@ -3274,19 +3316,17 @@ "build.rs", r#" use std::env; - use std::fs::File; - use std::io::Write; + use std::fs; use std::path::Path; fn main() { let out_dir = env::var_os("OUT_DIR").unwrap(); - let out_dir = Path::new(&out_dir).join("output"); - let mut f = File::create(&out_dir).unwrap(); + let output = Path::new(&out_dir).join("output"); if env::var_os("CARGO_FEATURE_FOO").is_some() { - f.write_all(b"foo").unwrap(); + fs::write(output, "foo").unwrap(); } else { - f.write_all(b"bar").unwrap(); + fs::write(output, "bar").unwrap(); } } "#, @@ -3937,7 +3977,9 @@ fn build_script_scan_eacces() { // build.rs causes a scan of the whole project, which can be a problem if // a directory is not accessible. + use cargo_test_support::git; use std::os::unix::fs::PermissionsExt; + let p = project() .file("src/lib.rs", "") .file("build.rs", "fn main() {}") @@ -3945,12 +3987,21 @@ .build(); let path = p.root().join("secrets"); fs::set_permissions(&path, fs::Permissions::from_mode(0)).unwrap(); - // "Caused by" is a string from libc such as the following: + // The last "Caused by" is a string from libc such as the following: // Permission denied (os error 13) p.cargo("build") .with_stderr( "\ -[ERROR] cannot read \"[..]/foo/secrets\" +[ERROR] failed to determine package fingerprint for build script for foo v0.0.1 ([..]/foo) + +Caused by: + failed to determine the most recently modified file in [..]/foo + +Caused by: + failed to determine list of files in [..]/foo + +Caused by: + cannot read \"[..]/foo/secrets\" Caused by: [..] @@ -3958,5 +4009,28 @@ ) .with_status(101) .run(); + + // Try `package.exclude` to skip a directory. + p.change_file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.0.1" + exclude = ["secrets"] + "#, + ); + p.cargo("build").run(); + + // Try with git. This succeeds because the git status walker ignores + // directories it can't access. + p.change_file("Cargo.toml", &basic_manifest("foo", "0.0.1")); + p.build_dir().rm_rf(); + let repo = git::init(&p.root()); + git::add(&repo); + git::commit(&repo); + p.cargo("build").run(); + + // Restore permissions so that the directory can be deleted. fs::set_permissions(&path, fs::Permissions::from_mode(0o755)).unwrap(); } diff -Nru cargo-0.44.1/tests/testsuite/cache_messages.rs cargo-0.47.0/tests/testsuite/cache_messages.rs --- cargo-0.44.1/tests/testsuite/cache_messages.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/cache_messages.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,8 +1,7 @@ //! Tests for caching compiler diagnostics. use cargo_test_support::{ - basic_manifest, command_is_available, is_coarse_mtime, process, project, registry::Package, - sleep_ms, + basic_manifest, is_coarse_mtime, process, project, registry::Package, sleep_ms, }; use std::path::Path; @@ -195,7 +194,7 @@ // Fill the cache. p.cargo("check").with_stderr_contains("[..]asdf[..]").run(); let cpath = p - .glob("target/debug/.fingerprint/foo-*/output") + .glob("target/debug/.fingerprint/foo-*/output-*") .next() .unwrap() .unwrap(); @@ -216,7 +215,10 @@ ", ) .run(); - assert_eq!(p.glob("target/debug/.fingerprint/foo-*/output").count(), 0); + assert_eq!( + p.glob("target/debug/.fingerprint/foo-*/output-*").count(), + 0 + ); // And again, check the cache is correct. p.cargo("check") @@ -254,7 +256,10 @@ let rustdoc_stderr = as_str(&rustdoc_output.stderr); assert!(rustdoc_stderr.contains("private")); assert!(rustdoc_stderr.contains("\x1b[")); - assert_eq!(p.glob("target/debug/.fingerprint/foo-*/output").count(), 1); + assert_eq!( + p.glob("target/debug/.fingerprint/foo-*/output-*").count(), + 1 + ); // Check the cached output. let rustdoc_output = p @@ -275,56 +280,6 @@ } #[cargo_test] -fn clippy() { - if !command_is_available("clippy-driver") { - return; - } - - // Caching clippy output. - // This is just a random clippy lint (assertions_on_constants) that - // hopefully won't change much in the future. - let p = project() - .file( - "src/lib.rs", - "pub fn f() { assert!(true); }\n\ - fn unused_func() {}", - ) - .build(); - - p.cargo("clippy-preview -Zunstable-options -v") - .masquerade_as_nightly_cargo() - .with_stderr_contains("[RUNNING] `clippy[..]") - .with_stderr_contains("[..]assert!(true)[..]") - .run(); - - // `check` should be separate from clippy. - p.cargo("check -v") - .with_stderr_contains( - "\ -[CHECKING] foo [..] -[RUNNING] `rustc[..] -[WARNING] [..]unused_func[..] -", - ) - .with_stderr_does_not_contain("[..]assert!(true)[..]") - .run(); - - // Again, reading from the cache. - p.cargo("clippy-preview -Zunstable-options -v") - .masquerade_as_nightly_cargo() - .with_stderr_contains("[FRESH] foo [..]") - .with_stderr_contains("[..]assert!(true)[..]") - .run(); - - // And `check` should also be fresh, reading from cache. - p.cargo("check -v") - .with_stderr_contains("[FRESH] foo [..]") - .with_stderr_contains("[WARNING] [..]unused_func[..]") - .with_stderr_does_not_contain("[..]assert!(true)[..]") - .run(); -} - -#[cargo_test] fn very_verbose() { // Handle cap-lints in dependencies. Package::new("bar", "1.0.0") @@ -382,14 +337,23 @@ p.cargo("build").run(); - assert_eq!(p.glob("target/debug/.fingerprint/foo-*/output").count(), 0); - assert_eq!(p.glob("target/debug/.fingerprint/dep-*/output").count(), 0); + assert_eq!( + p.glob("target/debug/.fingerprint/foo-*/output-*").count(), + 0 + ); + assert_eq!( + p.glob("target/debug/.fingerprint/dep-*/output-*").count(), + 0 + ); if is_coarse_mtime() { sleep_ms(1000); } p.change_file("src/lib.rs", "fn unused() {}"); p.cargo("build").run(); - assert_eq!(p.glob("target/debug/.fingerprint/foo-*/output").count(), 1); + assert_eq!( + p.glob("target/debug/.fingerprint/foo-*/output-*").count(), + 1 + ); } #[cargo_test] @@ -437,3 +401,128 @@ ) .run(); } + +#[cargo_test] +fn caching_large_output() { + // Handles large number of messages. + // This is an arbitrary amount that is greater than the 100 used in + // job_queue. This is here to check for deadlocks or any other problems. + const COUNT: usize = 250; + let rustc = project() + .at("rustc") + .file("Cargo.toml", &basic_manifest("rustc_alt", "1.0.0")) + .file( + "src/main.rs", + &format!( + r#" + fn main() {{ + for i in 0..{} {{ + eprintln!("{{{{\"message\": \"test message {{}}\", \"level\": \"warning\", \ + \"spans\": [], \"children\": [], \"rendered\": \"test message {{}}\"}}}}", + i, i); + }} + let r = std::process::Command::new("rustc") + .args(std::env::args_os().skip(1)) + .status(); + std::process::exit(r.unwrap().code().unwrap_or(2)); + }} + "#, + COUNT + ), + ) + .build(); + + let mut expected = String::new(); + for i in 0..COUNT { + expected.push_str(&format!("test message {}\n", i)); + } + + rustc.cargo("build").run(); + let p = project().file("src/lib.rs", "").build(); + p.cargo("check") + .env("RUSTC", rustc.bin("rustc_alt")) + .with_stderr(&format!( + "\ +[CHECKING] foo [..] +{}[FINISHED] dev [..] +", + expected + )) + .run(); + + p.cargo("check") + .env("RUSTC", rustc.bin("rustc_alt")) + .with_stderr(&format!( + "\ +{}[FINISHED] dev [..] +", + expected + )) + .run(); +} + +#[cargo_test] +fn rustc_workspace_wrapper() { + use cargo_test_support::paths; + + let p = project() + .file( + "src/lib.rs", + "pub fn f() { assert!(true); }\n\ + fn unused_func() {}", + ) + .build(); + + p.cargo("check -Zunstable-options -v") + .env("RUSTC_WORKSPACE_WRAPPER", paths::echo_wrapper()) + .masquerade_as_nightly_cargo() + .with_stderr_contains("WRAPPER CALLED: rustc --crate-name foo src/lib.rs [..]") + .run(); + + // Check without a wrapper should rebuild + p.cargo("check -v") + .with_stderr_contains( + "\ +[CHECKING] foo [..] +[RUNNING] `rustc[..] +[WARNING] [..]unused_func[..] +", + ) + .with_stdout_does_not_contain("WRAPPER CALLED: rustc --crate-name foo src/lib.rs [..]") + .run(); + + // Again, reading from the cache. + p.cargo("check -Zunstable-options -v") + .env("RUSTC_WORKSPACE_WRAPPER", paths::echo_wrapper()) + .masquerade_as_nightly_cargo() + .with_stderr_contains("[FRESH] foo [..]") + .with_stdout_does_not_contain("WRAPPER CALLED: rustc --crate-name foo src/lib.rs [..]") + .run(); + + // And `check` should also be fresh, reading from cache. + p.cargo("check -v") + .with_stderr_contains("[FRESH] foo [..]") + .with_stderr_contains("[WARNING] [..]unused_func[..]") + .with_stdout_does_not_contain("WRAPPER CALLED: rustc --crate-name foo src/lib.rs [..]") + .run(); +} + +#[cargo_test] +fn wacky_hashless_fingerprint() { + // On Windows, executables don't have hashes. This checks for a bad + // assumption that caused bad caching. + let p = project() + .file("src/bin/a.rs", "fn main() { let unused = 1; }") + .file("src/bin/b.rs", "fn main() {}") + .build(); + p.cargo("build --bin b") + .with_stderr_does_not_contain("[..]unused[..]") + .run(); + p.cargo("build --bin a") + .with_stderr_contains("[..]unused[..]") + .run(); + // This should not pick up the cache from `a`. + p.cargo("build --bin b") + .with_stderr_does_not_contain("[..]unused[..]") + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/cargo_command.rs cargo-0.47.0/tests/testsuite/cargo_command.rs --- cargo-0.44.1/tests/testsuite/cargo_command.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/cargo_command.rs 2020-07-17 20:39:39.000000000 +0000 @@ -2,8 +2,9 @@ use std::env; use std::fs::{self, File}; -use std::io::prelude::*; +use std::io::Read; use std::path::{Path, PathBuf}; +use std::process::Stdio; use std::str; use cargo_test_support::cargo_process; @@ -174,15 +175,14 @@ let root = paths::root(); let my_home = root.join("my_home"); fs::create_dir(&my_home).unwrap(); - File::create(&my_home.join("config")) - .unwrap() - .write_all( - br#" - [alias] - myalias = "build" - "#, - ) - .unwrap(); + fs::write( + &my_home.join("config"), + r#" + [alias] + myalias = "build" + "#, + ) + .unwrap(); cargo_process("myalais") .env("CARGO_HOME", &my_home) @@ -239,17 +239,16 @@ let root = paths::root(); let my_home = root.join("my_home"); fs::create_dir(&my_home).unwrap(); - File::create(&my_home.join("config")) - .unwrap() - .write_all( - br#" - [cargo-new] - name = "foo" - email = "bar" - git = false - "#, - ) - .unwrap(); + fs::write( + &my_home.join("config"), + r#" + [cargo-new] + name = "foo" + email = "bar" + git = false + "#, + ) + .unwrap(); cargo_process("new foo") .env("USER", "foo") @@ -257,11 +256,7 @@ .run(); let toml = paths::root().join("foo/Cargo.toml"); - let mut contents = String::new(); - File::open(&toml) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&toml).unwrap(); assert!(contents.contains(r#"authors = ["foo "]"#)); } @@ -378,3 +373,24 @@ .with_stdout_contains(" -Z unstable-options -- Allow the usage of unstable options") .run(); } + +#[cargo_test] +fn closed_output_ok() { + // Checks that closed output doesn't cause an error. + let mut p = cargo_process("--list").build_command(); + p.stdout(Stdio::piped()).stderr(Stdio::piped()); + let mut child = p.spawn().unwrap(); + // Close stdout + drop(child.stdout.take()); + // Read stderr + let mut s = String::new(); + child + .stderr + .as_mut() + .unwrap() + .read_to_string(&mut s) + .unwrap(); + let status = child.wait().unwrap(); + assert!(status.success()); + assert!(s.is_empty(), s); +} diff -Nru cargo-0.44.1/tests/testsuite/cargo_features.rs cargo-0.47.0/tests/testsuite/cargo_features.rs --- cargo-0.44.1/tests/testsuite/cargo_features.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/cargo_features.rs 2020-07-17 20:39:39.000000000 +0000 @@ -30,7 +30,7 @@ Caused by: feature `test-dummy-unstable` is required -consider adding `cargo-features = [\"test-dummy-unstable\"]` to the manifest + consider adding `cargo-features = [\"test-dummy-unstable\"]` to the manifest ", ) .run(); @@ -47,9 +47,9 @@ Caused by: feature `test-dummy-unstable` is required -this Cargo does not support nightly features, but if you -switch to nightly channel you can add -`cargo-features = [\"test-dummy-unstable\"]` to enable this feature + this Cargo does not support nightly features, but if you + switch to nightly channel you can add + `cargo-features = [\"test-dummy-unstable\"]` to enable this feature ", ) .run(); @@ -148,7 +148,7 @@ Caused by: the cargo feature `test-dummy-unstable` requires a nightly version of Cargo, \ but this is the `stable` channel -See [..] + See [..] ", ) .run(); @@ -213,7 +213,7 @@ Caused by: the cargo feature `test-dummy-unstable` requires a nightly version of Cargo, \ but this is the `stable` channel -See [..] + See [..] ", ) .run(); @@ -255,7 +255,7 @@ Caused by: the cargo feature `test-dummy-unstable` requires a nightly version of Cargo, \ but this is the `stable` channel -See [..] + See [..] ", ) .run(); @@ -323,8 +323,7 @@ ) .file("src/lib.rs", "") .build(); - p.cargo("publish --index") - .arg(registry::registry_url().to_string()) + p.cargo("publish --token sekrit") .masquerade_as_nightly_cargo() .run(); } diff -Nru cargo-0.44.1/tests/testsuite/cargo_targets.rs cargo-0.47.0/tests/testsuite/cargo_targets.rs --- cargo-0.44.1/tests/testsuite/cargo_targets.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/cargo_targets.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,39 @@ +//! Tests specifically related to target handling (lib, bins, examples, tests, benches). + +use cargo_test_support::project; + +#[cargo_test] +fn reserved_windows_target_name() { + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [[bin]] + name = "con" + path = "src/main.rs" + "#, + ) + .file("src/main.rs", "fn main() {}") + .build(); + + if cfg!(windows) { + p.cargo("check") + .with_stderr( + "\ +[WARNING] binary target `con` is a reserved Windows filename, \ +this target will not work on Windows platforms +[CHECKING] foo[..] +[FINISHED][..] +", + ) + .run(); + } else { + p.cargo("check") + .with_stderr("[CHECKING] foo[..]\n[FINISHED][..]") + .run(); + } +} diff -Nru cargo-0.44.1/tests/testsuite/check.rs cargo-0.47.0/tests/testsuite/check.rs --- cargo-0.44.1/tests/testsuite/check.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/check.rs 2020-07-17 20:39:39.000000000 +0000 @@ -5,7 +5,7 @@ use cargo_test_support::install::exe; use cargo_test_support::paths::CargoPathExt; use cargo_test_support::registry::Package; -use cargo_test_support::{basic_manifest, is_nightly, project}; +use cargo_test_support::{basic_manifest, project}; #[cargo_test] fn check_success() { @@ -683,11 +683,6 @@ #[cargo_test] fn short_message_format() { - if !is_nightly() { - // This relies on a bug fix https://github.com/rust-lang/rust/pull/64753. - // This check may be removed once 1.40 is stable. - return; - } let foo = project() .file("src/lib.rs", "fn foo() { let _x: bool = 'a'; }") .build(); @@ -753,6 +748,15 @@ } #[cargo_test] +fn does_not_use_empty_rustc_workspace_wrapper() { + let p = project().file("src/lib.rs", "").build(); + p.cargo("check -Zunstable-options") + .masquerade_as_nightly_cargo() + .env("RUSTC_WORKSPACE_WRAPPER", "") + .run(); +} + +#[cargo_test] fn error_from_deep_recursion() -> Result<(), fmt::Error> { let mut big_macro = String::new(); writeln!(big_macro, "macro_rules! m {{")?; @@ -772,3 +776,124 @@ Ok(()) } + +#[cargo_test] +fn rustc_workspace_wrapper_affects_all_workspace_members() { + use cargo_test_support::paths; + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["bar", "baz"] + "#, + ) + .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) + .file("bar/src/lib.rs", "pub fn bar() {}") + .file("baz/Cargo.toml", &basic_manifest("baz", "0.1.0")) + .file("baz/src/lib.rs", "pub fn baz() {}") + .build(); + + p.cargo("check -Zunstable-options") + .env("RUSTC_WORKSPACE_WRAPPER", paths::echo_wrapper()) + .masquerade_as_nightly_cargo() + .with_stderr_contains("WRAPPER CALLED: rustc --crate-name bar [..]") + .with_stderr_contains("WRAPPER CALLED: rustc --crate-name baz [..]") + .run(); +} + +#[cargo_test] +fn rustc_workspace_wrapper_includes_path_deps() { + use cargo_test_support::paths; + let p = project() + .file( + "Cargo.toml", + r#" + [project] + name = "foo" + version = "0.1.0" + authors = [] + + [workspace] + members = ["bar"] + + [dependencies] + baz = { path = "baz" } + "#, + ) + .file("src/lib.rs", "") + .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) + .file("bar/src/lib.rs", "pub fn bar() {}") + .file("baz/Cargo.toml", &basic_manifest("baz", "0.1.0")) + .file("baz/src/lib.rs", "pub fn baz() {}") + .build(); + + p.cargo("check --workspace -Zunstable-options") + .env("RUSTC_WORKSPACE_WRAPPER", paths::echo_wrapper()) + .masquerade_as_nightly_cargo() + .with_stderr_contains("WRAPPER CALLED: rustc --crate-name foo [..]") + .with_stderr_contains("WRAPPER CALLED: rustc --crate-name bar [..]") + .with_stderr_contains("WRAPPER CALLED: rustc --crate-name baz [..]") + .run(); +} + +#[cargo_test] +fn rustc_workspace_wrapper_respects_primary_units() { + use cargo_test_support::paths; + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["bar", "baz"] + "#, + ) + .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) + .file("bar/src/lib.rs", "pub fn bar() {}") + .file("baz/Cargo.toml", &basic_manifest("baz", "0.1.0")) + .file("baz/src/lib.rs", "pub fn baz() {}") + .build(); + + p.cargo("check -p bar -Zunstable-options") + .env("RUSTC_WORKSPACE_WRAPPER", paths::echo_wrapper()) + .masquerade_as_nightly_cargo() + .with_stderr_contains("WRAPPER CALLED: rustc --crate-name bar [..]") + .with_stdout_does_not_contain("WRAPPER CALLED: rustc --crate-name baz [..]") + .run(); +} + +#[cargo_test] +fn rustc_workspace_wrapper_excludes_published_deps() { + use cargo_test_support::paths; + let p = project() + .file( + "Cargo.toml", + r#" + [project] + name = "foo" + version = "0.1.0" + authors = [] + + [workspace] + members = ["bar"] + + [dependencies] + baz = "1.0.0" + "#, + ) + .file("src/lib.rs", "") + .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) + .file("bar/src/lib.rs", "pub fn bar() {}") + .build(); + + Package::new("baz", "1.0.0").publish(); + + p.cargo("check --workspace -v -Zunstable-options") + .env("RUSTC_WORKSPACE_WRAPPER", paths::echo_wrapper()) + .masquerade_as_nightly_cargo() + .with_stderr_contains("WRAPPER CALLED: rustc --crate-name foo [..]") + .with_stderr_contains("WRAPPER CALLED: rustc --crate-name bar [..]") + .with_stderr_contains("[CHECKING] baz [..]") + .with_stdout_does_not_contain("WRAPPER CALLED: rustc --crate-name baz [..]") + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/clean.rs cargo-0.47.0/tests/testsuite/clean.rs --- cargo-0.44.1/tests/testsuite/clean.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/clean.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,9 +1,10 @@ //! Tests for the `cargo clean` command. -use std::env; - +use cargo_test_support::paths::CargoPathExt; use cargo_test_support::registry::Package; -use cargo_test_support::{basic_bin_manifest, basic_manifest, git, main_file, project}; +use cargo_test_support::{basic_bin_manifest, basic_manifest, git, main_file, project, rustc_host}; +use std::env; +use std::path::Path; #[cargo_test] fn cargo_clean_simple() { @@ -185,7 +186,7 @@ if env::var("FIRST").is_ok() { std::fs::File::create(out.join("out")).unwrap(); } else { - assert!(!std::fs::metadata(out.join("out")).is_ok()); + assert!(!out.join("out").exists()); } } "#, @@ -291,6 +292,7 @@ [REMOVING] [..] [REMOVING] [..] [REMOVING] [..] +[REMOVING] [..] ", ) .run(); @@ -319,3 +321,217 @@ assert!(!p.target_debug_dir().join("libfoo.rlib").exists()); assert!(!rmeta.exists()); } + +#[cargo_test] +fn package_cleans_all_the_things() { + // -p cleans everything + // Use dashes everywhere to make sure dash/underscore stuff is handled. + for crate_type in &["rlib", "dylib", "cdylib", "staticlib", "proc-macro"] { + // Try each crate type individually since the behavior changes when + // they are combined. + let p = project() + .file( + "Cargo.toml", + &format!( + r#" + [package] + name = "foo-bar" + version = "0.1.0" + + [lib] + crate-type = ["{}"] + "#, + crate_type + ), + ) + .file("src/lib.rs", "") + .build(); + p.cargo("build").run(); + p.cargo("clean -p foo-bar").run(); + assert_all_clean(&p.build_dir()); + } + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo-bar" + version = "0.1.0" + edition = "2018" + + [lib] + crate-type = ["rlib", "dylib", "staticlib"] + + [[example]] + name = "foo-ex-rlib" + crate-type = ["rlib"] + test = true + + [[example]] + name = "foo-ex-cdylib" + crate-type = ["cdylib"] + test = true + + [[example]] + name = "foo-ex-bin" + test = true + "#, + ) + .file("src/lib.rs", "") + .file("src/main.rs", "fn main() {}") + .file("src/bin/other-main.rs", "fn main() {}") + .file("examples/foo-ex-rlib.rs", "") + .file("examples/foo-ex-cdylib.rs", "") + .file("examples/foo-ex-bin.rs", "fn main() {}") + .file("tests/foo-test.rs", "") + .file("benches/foo-bench.rs", "") + .file("build.rs", "fn main() {}") + .build(); + + p.cargo("build --all-targets") + .env("CARGO_INCREMENTAL", "1") + .run(); + p.cargo("test --all-targets") + .env("CARGO_INCREMENTAL", "1") + .run(); + p.cargo("check --all-targets") + .env("CARGO_INCREMENTAL", "1") + .run(); + p.cargo("clean -p foo-bar").run(); + assert_all_clean(&p.build_dir()); + + // Try some targets. + p.cargo("build --all-targets --target") + .arg(rustc_host()) + .run(); + p.cargo("clean -p foo-bar --target").arg(rustc_host()).run(); + assert_all_clean(&p.build_dir()); +} + +// Ensures that all files for the package have been deleted. +fn assert_all_clean(build_dir: &Path) { + let walker = walkdir::WalkDir::new(build_dir).into_iter(); + for entry in walker.filter_entry(|e| { + let path = e.path(); + // This is a known limitation, clean can't differentiate between + // the different build scripts from different packages. + !(path + .file_name() + .unwrap() + .to_str() + .unwrap() + .starts_with("build_script_build") + && path + .parent() + .unwrap() + .file_name() + .unwrap() + .to_str() + .unwrap() + == "incremental") + }) { + let entry = entry.unwrap(); + let path = entry.path(); + if let ".rustc_info.json" | ".cargo-lock" | "CACHEDIR.TAG" = + path.file_name().unwrap().to_str().unwrap() + { + continue; + } + if path.is_symlink() || path.is_file() { + panic!("{:?} was not cleaned", path); + } + } +} + +#[cargo_test] +fn clean_spec_multiple() { + // clean -p foo where foo matches multiple versions + Package::new("bar", "1.0.0").publish(); + Package::new("bar", "2.0.0").publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + bar1 = {version="1.0", package="bar"} + bar2 = {version="2.0", package="bar"} + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("build").run(); + p.cargo("clean -p bar:1.0.0") + .with_stderr( + "warning: version qualifier in `-p bar:1.0.0` is ignored, \ + cleaning all versions of `bar` found", + ) + .run(); + let mut walker = walkdir::WalkDir::new(p.build_dir()) + .into_iter() + .filter_map(|e| e.ok()) + .filter(|e| { + let n = e.file_name().to_str().unwrap(); + n.starts_with("bar") || n.starts_with("libbar") + }); + if let Some(e) = walker.next() { + panic!("{:?} was not cleaned", e.path()); + } +} + +#[cargo_test] +fn clean_spec_reserved() { + // Clean when a target (like a test) has a reserved name. In this case, + // make sure `clean -p` doesn't delete the reserved directory `build` when + // there is a test named `build`. + Package::new("bar", "1.0.0") + .file("src/lib.rs", "") + .file("build.rs", "fn main() {}") + .publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + bar = "1.0" + "#, + ) + .file("src/lib.rs", "") + .file("tests/build.rs", "") + .build(); + + p.cargo("build --all-targets").run(); + assert!(p.target_debug_dir().join("build").is_dir()); + let build_test = p.glob("target/debug/deps/build-*").next().unwrap().unwrap(); + assert!(build_test.exists()); + // Tests are never "uplifted". + assert!(p.glob("target/debug/build-*").next().is_none()); + + p.cargo("clean -p foo").run(); + // Should not delete this. + assert!(p.target_debug_dir().join("build").is_dir()); + + // This should not rebuild bar. + p.cargo("build -v --all-targets") + .with_stderr( + "\ +[FRESH] bar v1.0.0 +[COMPILING] foo v0.1.0 [..] +[RUNNING] `rustc [..] +[RUNNING] `rustc [..] +[RUNNING] `rustc [..] +[FINISHED] [..] +", + ) + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/clippy.rs cargo-0.47.0/tests/testsuite/clippy.rs --- cargo-0.44.1/tests/testsuite/clippy.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/clippy.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,76 +0,0 @@ -//! Tests for the `cargo clippy` command. - -use cargo_test_support::{command_is_available, project, registry::Package}; - -#[cargo_test] -// Clippy should never be considered fresh. -fn clippy_force_rebuild() { - if !command_is_available("clippy-driver") { - return; - } - - Package::new("dep1", "0.1.0").publish(); - - // This is just a random clippy lint (assertions_on_constants) that - // hopefully won't change much in the future. - let p = project() - .file( - "Cargo.toml", - r#" - [package] - name = "foo" - version = "0.1.0" - - [dependencies] - dep1 = "0.1" - "#, - ) - .file("src/lib.rs", "pub fn f() { assert!(true); }") - .build(); - - p.cargo("clippy-preview -Zunstable-options -v") - .masquerade_as_nightly_cargo() - .with_stderr_contains("[..]assert!(true)[..]") - .run(); - - // Make sure it runs again. - p.cargo("clippy-preview -Zunstable-options -v") - .masquerade_as_nightly_cargo() - .with_stderr_contains("[FRESH] dep1 v0.1.0") - .with_stderr_contains("[..]assert!(true)[..]") - .run(); -} - -#[cargo_test] -fn clippy_passes_args() { - if !command_is_available("clippy-driver") { - return; - } - - // This is just a random clippy lint (assertions_on_constants) that - // hopefully won't change much in the future. - let p = project() - .file( - "Cargo.toml", - r#" - [package] - name = "foo" - version = "0.1.0" - - [dependencies] - "#, - ) - .file("src/lib.rs", "pub fn f() { assert!(true); }") - .build(); - - p.cargo("clippy-preview -Zunstable-options -v -- -Aclippy::assertions_on_constants") - .masquerade_as_nightly_cargo() - .with_stderr_does_not_contain("[..]assert!(true)[..]") - .run(); - - // Make sure it runs again. - p.cargo("clippy-preview -Zunstable-options -v") - .masquerade_as_nightly_cargo() - .with_stderr_contains("[..]assert!(true)[..]") - .run(); -} diff -Nru cargo-0.44.1/tests/testsuite/collisions.rs cargo-0.47.0/tests/testsuite/collisions.rs --- cargo-0.44.1/tests/testsuite/collisions.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/collisions.rs 2020-07-17 20:39:39.000000000 +0000 @@ -91,9 +91,9 @@ } #[cargo_test] -// --out-dir and examples are currently broken on MSVC. +// --out-dir and examples are currently broken on MSVC and apple. // See https://github.com/rust-lang/cargo/issues/7493 -#[cfg(not(target_env = "msvc"))] +#[cfg_attr(any(target_env = "msvc", target_vendor = "apple"), ignore)] fn collision_export() { // `--out-dir` combines some things which can cause conflicts. let p = project() diff -Nru cargo-0.44.1/tests/testsuite/concurrent.rs cargo-0.47.0/tests/testsuite/concurrent.rs --- cargo-0.44.1/tests/testsuite/concurrent.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/concurrent.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,7 +1,6 @@ //! Tests for running multiple `cargo` processes at the same time. -use std::fs::{self, File}; -use std::io::Write; +use std::fs; use std::net::TcpListener; use std::process::Stdio; use std::sync::mpsc::channel; @@ -185,10 +184,7 @@ let repo = git2::Repository::open(&a.root()).unwrap(); git::tag(&repo, "tag1"); - File::create(a.root().join("src/lib.rs")) - .unwrap() - .write_all(b"pub fn tag2() {}") - .unwrap(); + a.change_file("src/lib.rs", "pub fn tag2() {}"); git::add(&repo); git::commit(&repo); git::tag(&repo, "tag2"); @@ -308,10 +304,7 @@ // Make a new commit on the master branch let repo = git2::Repository::open(&a.root()).unwrap(); - File::create(a.root().join("src/lib.rs")) - .unwrap() - .write_all(b"pub fn f2() {}") - .unwrap(); + a.change_file("src/lib.rs", "pub fn f2() {}"); git::add(&repo); git::commit(&repo); diff -Nru cargo-0.44.1/tests/testsuite/config_include.rs cargo-0.47.0/tests/testsuite/config_include.rs --- cargo-0.44.1/tests/testsuite/config_include.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/config_include.rs 2020-07-17 20:39:39.000000000 +0000 @@ -3,7 +3,7 @@ use super::config::{ assert_error, assert_match, read_output, write_config, write_config_at, ConfigBuilder, }; -use cargo_test_support::{paths, NO_SUCH_FILE_ERR_MSG}; +use cargo_test_support::{no_such_file_err_msg, paths}; use std::fs; #[cargo_test] @@ -92,7 +92,7 @@ Caused by: {}", - NO_SUCH_FILE_ERR_MSG + no_such_file_err_msg() ), ); } @@ -179,7 +179,7 @@ Caused by: {}", - NO_SUCH_FILE_ERR_MSG + no_such_file_err_msg() ), ); } diff -Nru cargo-0.44.1/tests/testsuite/config.rs cargo-0.47.0/tests/testsuite/config.rs --- cargo-0.44.1/tests/testsuite/config.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/config.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,7 +1,9 @@ //! Tests for config settings. -use cargo::core::{enable_nightly_features, InternedString, Shell}; +use cargo::core::profiles::Strip; +use cargo::core::{enable_nightly_features, Shell}; use cargo::util::config::{self, Config, SslVersionConfig, StringList}; +use cargo::util::interning::InternedString; use cargo::util::toml::{self, VecStringOrBool as VSOB}; use cargo::CargoResult; use cargo_test_support::{normalized_lines_match, paths, project, t}; @@ -574,7 +576,7 @@ error in [..]/.cargo/config: could not load config key `profile.dev.opt-level` Caused by: - must be an integer, `z`, or `s`, but found: foo", + must be an integer, `z`, or `s`, but found the string: \"foo\"", ); let config = ConfigBuilder::new() @@ -587,7 +589,7 @@ error in environment variable `CARGO_PROFILE_DEV_OPT_LEVEL`: could not load config key `profile.dev.opt-level` Caused by: - must be an integer, `z`, or `s`, but found: asdf", + must be an integer, `z`, or `s`, but found the string: \"asdf\"", ); } @@ -1258,3 +1260,42 @@ "error in environment variable `CARGO_KEY3`: expected string, found integer", ); } + +#[cargo_test] +fn parse_enum() { + write_config( + "\ +[profile.release] +strip = 'debuginfo' +", + ); + + let config = new_config(); + + let p: toml::TomlProfile = config.get("profile.release").unwrap(); + let strip = p.strip.unwrap(); + assert_eq!(strip, Strip::DebugInfo); +} + +#[cargo_test] +fn parse_enum_fail() { + write_config( + "\ +[profile.release] +strip = 'invalid' +", + ); + + let config = new_config(); + + assert_error( + config + .get::("profile.release") + .unwrap_err(), + "\ +error in [..]/.cargo/config: could not load config key `profile.release.strip` + +Caused by: + unknown variant `invalid`, expected one of `debuginfo`, `none`, `symbols`", + ); +} diff -Nru cargo-0.44.1/tests/testsuite/cross_compile.rs cargo-0.47.0/tests/testsuite/cross_compile.rs --- cargo-0.44.1/tests/testsuite/cross_compile.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/cross_compile.rs 2020-07-17 20:39:39.000000000 +0000 @@ -193,7 +193,7 @@ "\ [COMPILING] foo v0.5.0 ([CWD]) [RUNNING] `rustc --crate-name foo src/foo.rs [..]--crate-type bin \ - --emit=[..]link -C debuginfo=2 \ + --emit=[..]link[..]-C debuginfo=2 \ -C metadata=[..] \ --out-dir [CWD]/target/{target}/debug/deps \ --target {target} \ @@ -1067,3 +1067,52 @@ .with_stdout_contains_n("test foo ... ok", 2) .run(); } + +#[cargo_test] +fn doctest_xcompile_linker() { + if cross_compile::disabled() { + return; + } + if !is_nightly() { + // -Zdoctest-xcompile is unstable + return; + } + + let target = cross_compile::alternate(); + let p = project() + .file( + ".cargo/config", + &format!( + r#" + [target.{}] + linker = "my-linker-tool" + "#, + target + ), + ) + .file("Cargo.toml", &basic_manifest("foo", "0.1.0")) + .file( + "src/lib.rs", + r#" + /// ``` + /// assert_eq!(1, 1); + /// ``` + pub fn foo() {} + "#, + ) + .build(); + + // Fails because `my-linker-tool` doesn't actually exist. + p.cargo("test --doc -v -Zdoctest-xcompile --target") + .arg(&target) + .with_status(101) + .masquerade_as_nightly_cargo() + .with_stderr_contains(&format!( + "\ +[RUNNING] `rustdoc --crate-type lib --test [..]\ + --target {target} [..] -C linker=my-linker-tool[..] +", + target = target, + )) + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/cross_publish.rs cargo-0.47.0/tests/testsuite/cross_publish.rs --- cargo-0.44.1/tests/testsuite/cross_publish.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/cross_publish.rs 2020-07-17 20:39:39.000000000 +0000 @@ -97,8 +97,7 @@ let target = cross_compile::alternate(); - p.cargo("publish --index") - .arg(registry::registry_url().to_string()) + p.cargo("publish --token sekrit") .arg("--target") .arg(&target) .with_stderr(&format!( diff -Nru cargo-0.44.1/tests/testsuite/custom_target.rs cargo-0.47.0/tests/testsuite/custom_target.rs --- cargo-0.44.1/tests/testsuite/custom_target.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/custom_target.rs 2020-07-17 20:39:39.000000000 +0000 @@ -51,6 +51,12 @@ p.cargo("build --lib --target custom-target.json -v").run(); p.cargo("build --lib --target src/../custom-target.json -v") .run(); + + // Ensure that the correct style of flag is passed to --target with doc tests. + p.cargo("test --doc --target src/../custom-target.json -v -Zdoctest-xcompile") + .masquerade_as_nightly_cargo() + .with_stderr_contains("[RUNNING] `rustdoc [..]--target [..]foo/custom-target.json[..]") + .run(); } #[cargo_test] diff -Nru cargo-0.44.1/tests/testsuite/death.rs cargo-0.47.0/tests/testsuite/death.rs --- cargo-0.44.1/tests/testsuite/death.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/death.rs 2020-07-17 20:39:39.000000000 +0000 @@ -8,52 +8,8 @@ use cargo_test_support::{project, slow_cpu_multiplier}; -#[cfg(unix)] -fn enabled() -> bool { - true -} - -// On Windows support for these tests is only enabled through the usage of job -// objects. Support for nested job objects, however, was added in recent-ish -// versions of Windows, so this test may not always be able to succeed. -// -// As a result, we try to add ourselves to a job object here -// can succeed or not. -#[cfg(windows)] -fn enabled() -> bool { - use winapi::um::{handleapi, jobapi, jobapi2, processthreadsapi}; - - unsafe { - // If we're not currently in a job, then we can definitely run these - // tests. - let me = processthreadsapi::GetCurrentProcess(); - let mut ret = 0; - let r = jobapi::IsProcessInJob(me, 0 as *mut _, &mut ret); - assert_ne!(r, 0); - if ret == ::winapi::shared::minwindef::FALSE { - return true; - } - - // If we are in a job, then we can run these tests if we can be added to - // a nested job (as we're going to create a nested job no matter what as - // part of these tests. - // - // If we can't be added to a nested job, then these tests will - // definitely fail, and there's not much we can do about that. - let job = jobapi2::CreateJobObjectW(0 as *mut _, 0 as *const _); - assert!(!job.is_null()); - let r = jobapi2::AssignProcessToJobObject(job, me); - handleapi::CloseHandle(job); - r != 0 - } -} - #[cargo_test] fn ctrl_c_kills_everyone() { - if !enabled() { - return; - } - let listener = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = listener.local_addr().unwrap(); @@ -132,7 +88,7 @@ } #[cfg(unix)] -fn ctrl_c(child: &mut Child) { +pub fn ctrl_c(child: &mut Child) { let r = unsafe { libc::kill(-(child.id() as i32), libc::SIGINT) }; if r < 0 { panic!("failed to kill: {}", io::Error::last_os_error()); @@ -140,6 +96,6 @@ } #[cfg(windows)] -fn ctrl_c(child: &mut Child) { +pub fn ctrl_c(child: &mut Child) { child.kill().unwrap(); } diff -Nru cargo-0.44.1/tests/testsuite/dep_info.rs cargo-0.47.0/tests/testsuite/dep_info.rs --- cargo-0.44.1/tests/testsuite/dep_info.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/dep_info.rs 2020-07-17 20:39:39.000000000 +0000 @@ -9,6 +9,7 @@ use filetime::FileTime; use std::fs; use std::path::Path; +use std::str; // Helper for testing dep-info files in the fingerprint dir. fn assert_deps(project: &Project, fingerprint: &str, test_cb: impl Fn(&Path, &[(u8, &str)])) { @@ -22,17 +23,38 @@ .unwrap_or_else(|| panic!("expected 1 dep-info file at {}, found 0", fingerprint)); assert!(files.next().is_none(), "expected only 1 dep-info file"); let dep_info = fs::read(&info_path).unwrap(); - let deps: Vec<(u8, &str)> = dep_info - .split(|&x| x == 0) - .filter(|x| !x.is_empty()) - .map(|p| { + let dep_info = &mut &dep_info[..]; + let deps = (0..read_usize(dep_info)) + .map(|_| { ( - p[0], - std::str::from_utf8(&p[1..]).expect("expected valid path"), + read_u8(dep_info), + str::from_utf8(read_bytes(dep_info)).unwrap(), ) }) - .collect(); + .collect::>(); test_cb(&info_path, &deps); + + fn read_usize(bytes: &mut &[u8]) -> usize { + let ret = &bytes[..4]; + *bytes = &bytes[4..]; + ((ret[0] as usize) << 0) + | ((ret[1] as usize) << 8) + | ((ret[2] as usize) << 16) + | ((ret[3] as usize) << 24) + } + + fn read_u8(bytes: &mut &[u8]) -> u8 { + let ret = bytes[0]; + *bytes = &bytes[1..]; + ret + } + + fn read_bytes<'a>(bytes: &mut &'a [u8]) -> &'a [u8] { + let n = read_usize(bytes) as usize; + let ret = &bytes[..n]; + *bytes = &bytes[n..]; + ret + } } fn assert_deps_contains(project: &Project, fingerprint: &str, expected: &[(u8, &str)]) { @@ -272,32 +294,32 @@ assert_deps_contains( &p, - "target/debug/.fingerprint/pm-*/dep-lib-pm-*", - &[(1, "src/lib.rs"), (2, "debug/deps/libpmdep-*.rlib")], + "target/debug/.fingerprint/pm-*/dep-lib-pm", + &[(0, "src/lib.rs"), (1, "debug/deps/libpmdep-*.rlib")], ); assert_deps_contains( &p, - &format!("target/{}/debug/.fingerprint/foo-*/dep-bin-foo*", host), + &format!("target/{}/debug/.fingerprint/foo-*/dep-bin-foo", host), &[ - (1, "src/main.rs"), + (0, "src/main.rs"), ( - 2, + 1, &format!( "debug/deps/{}pm-*.{}", paths::get_lib_prefix("proc-macro"), paths::get_lib_extension("proc-macro") ), ), - (2, &format!("{}/debug/deps/libbar-*.rlib", host)), - (2, &format!("{}/debug/deps/libregdep-*.rlib", host)), + (1, &format!("{}/debug/deps/libbar-*.rlib", host)), + (1, &format!("{}/debug/deps/libregdep-*.rlib", host)), ], ); assert_deps_contains( &p, - "target/debug/.fingerprint/foo-*/dep-build-script-build_script_build-*", - &[(1, "build.rs"), (2, "debug/deps/libbdep-*.rlib")], + "target/debug/.fingerprint/foo-*/dep-build-script-build-script-build", + &[(0, "build.rs"), (1, "debug/deps/libbdep-*.rlib")], ); // Make sure it stays fresh. @@ -400,32 +422,32 @@ assert_deps_contains( &p, - "target/debug/.fingerprint/pm-*/dep-lib-pm-*", - &[(1, "src/lib.rs"), (2, "debug/deps/libpmdep-*.rlib")], + "target/debug/.fingerprint/pm-*/dep-lib-pm", + &[(0, "src/lib.rs"), (1, "debug/deps/libpmdep-*.rlib")], ); assert_deps_contains( &p, - "target/debug/.fingerprint/foo-*/dep-bin-foo*", + "target/debug/.fingerprint/foo-*/dep-bin-foo", &[ - (1, "src/main.rs"), + (0, "src/main.rs"), ( - 2, + 1, &format!( "debug/deps/{}pm-*.{}", paths::get_lib_prefix("proc-macro"), paths::get_lib_extension("proc-macro") ), ), - (2, "debug/deps/libbar-*.rlib"), - (2, "debug/deps/libregdep-*.rlib"), + (1, "debug/deps/libbar-*.rlib"), + (1, "debug/deps/libregdep-*.rlib"), ], ); assert_deps_contains( &p, - "target/debug/.fingerprint/foo-*/dep-build-script-build_script_build-*", - &[(1, "build.rs"), (2, "debug/deps/libbdep-*.rlib")], + "target/debug/.fingerprint/foo-*/dep-build-script-build-script-build", + &[(0, "build.rs"), (1, "debug/deps/libbdep-*.rlib")], ); // Make sure it stays fresh. @@ -461,7 +483,7 @@ assert_deps( &p, - "target/debug/.fingerprint/regdep-*/dep-lib-regdep-*", + "target/debug/.fingerprint/regdep-*/dep-lib-regdep", |info_path, entries| { for (kind, path) in entries { if *kind == 1 { @@ -513,7 +535,7 @@ assert_deps_contains( &p, - "target/debug/.fingerprint/foo-*/dep-lib-foo-*", - &[(1, "src/lib.rs"), (2, "debug/deps/libregdep-*.rmeta")], + "target/debug/.fingerprint/foo-*/dep-lib-foo", + &[(0, "src/lib.rs"), (1, "debug/deps/libregdep-*.rmeta")], ); } diff -Nru cargo-0.44.1/tests/testsuite/directory.rs cargo-0.47.0/tests/testsuite/directory.rs --- cargo-0.44.1/tests/testsuite/directory.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/directory.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,8 +1,7 @@ //! Tests for directory sources. use std::collections::HashMap; -use std::fs::{self, File}; -use std::io::prelude::*; +use std::fs; use std::str; use serde::Serialize; @@ -16,8 +15,9 @@ fn setup() { let root = paths::root(); t!(fs::create_dir(&root.join(".cargo"))); - t!(t!(File::create(root.join(".cargo/config"))).write_all( - br#" + t!(fs::write( + root.join(".cargo/config"), + r#" [source.crates-io] replace-with = 'my-awesome-local-registry' @@ -192,9 +192,9 @@ Caused by: no matching package named `baz` found -location searched: registry `https://github.com/rust-lang/crates.io-index` -perhaps you meant: bar or foo -required by package `bar v0.1.0` + location searched: registry `https://github.com/rust-lang/crates.io-index` + perhaps you meant: bar or foo + required by package `bar v0.1.0` ", ) .run(); @@ -442,8 +442,10 @@ .file("src/lib.rs", "") .build(); - let mut f = t!(File::create(paths::root().join("index/bar/src/lib.rs"))); - t!(f.write_all(b"fn bar() -> u32 { 0 }")); + t!(fs::write( + paths::root().join("index/bar/src/lib.rs"), + "fn bar() -> u32 { 0 }" + )); let p = project() .file( @@ -576,24 +578,23 @@ p.cargo("build").run(); - let mut lock1 = String::new(); - t!(t!(File::open(p.root().join("Cargo.lock"))).read_to_string(&mut lock1)); + let lock1 = p.read_lockfile(); let root = paths::root(); t!(fs::create_dir(&root.join(".cargo"))); - t!(t!(File::create(root.join(".cargo/config"))).write_all( + t!(fs::write( + root.join(".cargo/config"), format!( r#" - [source.my-git-repo] - git = '{}' - replace-with = 'my-awesome-local-registry' - - [source.my-awesome-local-registry] - directory = 'index' - "#, + [source.my-git-repo] + git = '{}' + replace-with = 'my-awesome-local-registry' + + [source.my-awesome-local-registry] + directory = 'index' + "#, git.url() ) - .as_bytes() )); p.cargo("build") @@ -606,8 +607,7 @@ ) .run(); - let mut lock2 = String::new(); - t!(t!(File::open(p.root().join("Cargo.lock"))).read_to_string(&mut lock2)); + let lock2 = p.read_lockfile(); assert_eq!(lock1, lock2, "lock files changed"); } @@ -637,15 +637,16 @@ let root = paths::root(); t!(fs::create_dir(&root.join(".cargo"))); - t!(t!(File::create(root.join(".cargo/config"))).write_all( - br#" - [source.my-git-repo] - git = 'https://example.com/' - replace-with = 'my-awesome-local-registry' - - [source.my-awesome-local-registry] - directory = 'index' - "# + t!(fs::write( + root.join(".cargo/config"), + r#" + [source.my-git-repo] + git = 'https://example.com/' + replace-with = 'my-awesome-local-registry' + + [source.my-awesome-local-registry] + directory = 'index' + "# )); p.cargo("build") @@ -662,11 +663,10 @@ Caused by: the source my-git-repo requires a lock file to be present first before it can be -used against vendored source code - -remove the source replacement configuration, generate a lock file, and then -restore the source replacement configuration to continue the build + used against vendored source code + remove the source replacement configuration, generate a lock file, and then + restore the source replacement configuration to continue the build ", ) .run(); @@ -764,8 +764,8 @@ failed to select a version for the requirement `foo = \"^2\"` candidate versions found which didn't match: 0.0.1 location searched: directory source `[..] (which is replacing registry `[..]`) -required by package `bar v0.1.0` -perhaps a crate was updated and forgotten to be re-vendored? + required by package `bar v0.1.0` + perhaps a crate was updated and forgotten to be re-vendored? ", ) .with_status(101) diff -Nru cargo-0.44.1/tests/testsuite/doc.rs cargo-0.47.0/tests/testsuite/doc.rs --- cargo-0.44.1/tests/testsuite/doc.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/doc.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,13 +1,11 @@ //! Tests for the `cargo doc` command. -use std::fs::{self, File}; -use std::io::Read; -use std::str; - use cargo_test_support::paths::CargoPathExt; use cargo_test_support::registry::Package; use cargo_test_support::{basic_lib_manifest, basic_manifest, git, project}; use cargo_test_support::{is_nightly, rustc_host}; +use std::fs; +use std::str; #[cargo_test] fn simple() { @@ -385,14 +383,7 @@ ", ) .run(); - assert!(p.root().join("target/doc").is_dir()); - let doc_file = p.root().join("target/doc/foo/index.html"); - assert!(doc_file.is_file()); - let mut doc_html = String::new(); - File::open(&doc_file) - .unwrap() - .read_to_string(&mut doc_html) - .unwrap(); + let doc_html = p.read_file("target/doc/foo/index.html"); assert!(doc_html.contains("Library")); assert!(!doc_html.contains("Binary")); } @@ -427,14 +418,7 @@ ", ) .run(); - assert!(p.root().join("target/doc").is_dir()); - let doc_file = p.root().join("target/doc/foo/index.html"); - assert!(doc_file.is_file()); - let mut doc_html = String::new(); - File::open(&doc_file) - .unwrap() - .read_to_string(&mut doc_html) - .unwrap(); + let doc_html = p.read_file("target/doc/foo/index.html"); assert!(doc_html.contains("Library")); assert!(!doc_html.contains("Binary")); } @@ -470,14 +454,7 @@ ", ) .run(); - assert!(p.root().join("target/doc").is_dir()); - let doc_file = p.root().join("target/doc/foo/index.html"); - assert!(doc_file.is_file()); - let mut doc_html = String::new(); - File::open(&doc_file) - .unwrap() - .read_to_string(&mut doc_html) - .unwrap(); + let doc_html = p.read_file("target/doc/foo/index.html"); assert!(!doc_html.contains("Library")); assert!(doc_html.contains("Binary")); } @@ -513,14 +490,7 @@ ", ) .run(); - assert!(p.root().join("target/doc").is_dir()); - let doc_file = p.root().join("target/doc/foo/index.html"); - assert!(doc_file.is_file()); - let mut doc_html = String::new(); - File::open(&doc_file) - .unwrap() - .read_to_string(&mut doc_html) - .unwrap(); + let doc_html = p.read_file("target/doc/foo/index.html"); assert!(!doc_html.contains("Library")); assert!(doc_html.contains("Binary")); } @@ -1513,7 +1483,36 @@ #[cargo_test] fn crate_versions() { - // Testing unstable flag + // Testing flag that will reach stable on 1.44 + if !is_nightly() { + return; + } + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "1.2.4" + authors = [] + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("-Z crate-versions doc") + .masquerade_as_nightly_cargo() + .run(); + + let output_path = p.root().join("target/doc/foo/index.html"); + let output_documentation = fs::read_to_string(&output_path).unwrap(); + + assert!(output_documentation.contains("Version 1.2.4")); +} + +#[cargo_test] +fn crate_versions_flag_is_overridden() { + // Testing flag that will reach stable on 1.44 if !is_nightly() { return; } @@ -1530,16 +1529,25 @@ .file("src/lib.rs", "") .build(); + let output_documentation = || { + let output_path = p.root().join("target/doc/foo/index.html"); + fs::read_to_string(&output_path).unwrap() + }; + let asserts = |html: String| { + assert!(!html.contains("1.2.4")); + assert!(html.contains("Version 2.0.3")); + }; + p.cargo("-Z crate-versions doc") .masquerade_as_nightly_cargo() + .env("RUSTDOCFLAGS", "--crate-version 2.0.3") .run(); + asserts(output_documentation()); - let doc_file = p.root().join("target/doc/foo/index.html"); - let mut doc_html = String::new(); - File::open(&doc_file) - .unwrap() - .read_to_string(&mut doc_html) - .unwrap(); + p.build_dir().rm_rf(); - assert!(doc_html.contains("Version 1.2.4")); + p.cargo("-Z crate-versions rustdoc -- --crate-version 2.0.3") + .masquerade_as_nightly_cargo() + .run(); + asserts(output_documentation()); } diff -Nru cargo-0.44.1/tests/testsuite/features2.rs cargo-0.47.0/tests/testsuite/features2.rs --- cargo-0.44.1/tests/testsuite/features2.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/features2.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,7 +1,11 @@ //! Tests for the new feature resolver. -use cargo_test_support::project; +use cargo_test_support::cross_compile::{self, alternate}; +use cargo_test_support::paths::CargoPathExt; +use cargo_test_support::publish::validate_crate_contents; use cargo_test_support::registry::{Dependency, Package}; +use cargo_test_support::{basic_manifest, cargo_process, project, rustc_host}; +use std::fs::File; #[cargo_test] fn inactivate_targets() { @@ -183,8 +187,51 @@ } #[cargo_test] -fn decouple_build_deps() { - // Basic test for `build_dep` decouple. +fn itarget_proc_macro() { + // itarget inside a proc-macro while cross-compiling + if cross_compile::disabled() { + return; + } + Package::new("hostdep", "1.0.0").publish(); + Package::new("pm", "1.0.0") + .proc_macro(true) + .target_dep("hostdep", "1.0", &rustc_host()) + .file("src/lib.rs", "extern crate hostdep;") + .publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + pm = "1.0" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("check").run(); + p.cargo("check -Zfeatures=itarget") + .masquerade_as_nightly_cargo() + .run(); + p.cargo("check --target").arg(alternate()).run(); + p.cargo("check -Zfeatures=itarget --target") + .arg(alternate()) + .masquerade_as_nightly_cargo() + .run(); + // For good measure, just make sure things don't break. + p.cargo("check -Zfeatures=all --target") + .arg(alternate()) + .masquerade_as_nightly_cargo() + .run(); +} + +#[cargo_test] +fn decouple_host_deps() { + // Basic test for `host_dep` decouple. Package::new("common", "1.0.0") .feature("f1", &[]) .file( @@ -229,14 +276,14 @@ .with_stderr_contains("[..]unresolved import `common::bar`[..]") .run(); - p.cargo("check -Zfeatures=build_dep") + p.cargo("check -Zfeatures=host_dep") .masquerade_as_nightly_cargo() .run(); } #[cargo_test] -fn decouple_build_deps_nested() { - // `build_dep` decouple of transitive dependencies. +fn decouple_host_deps_nested() { + // `host_dep` decouple of transitive dependencies. Package::new("common", "1.0.0") .feature("f1", &[]) .file( @@ -294,7 +341,7 @@ .with_stderr_contains("[..]unresolved import `common::bar`[..]") .run(); - p.cargo("check -Zfeatures=build_dep") + p.cargo("check -Zfeatures=host_dep") .masquerade_as_nightly_cargo() .run(); } @@ -590,7 +637,7 @@ .run(); // Normal only. - p.cargo("run -Zfeatures=dev_dep,build_dep") + p.cargo("run -Zfeatures=dev_dep,host_dep") .env("CARGO_FEATURE_EXPECT", "1") .masquerade_as_nightly_cargo() .run(); @@ -604,7 +651,7 @@ .run(); // normal + dev unify - p.cargo("test -Zfeatures=build_dep") + p.cargo("test -Zfeatures=host_dep") .env("CARGO_FEATURE_EXPECT", "3") .masquerade_as_nightly_cargo() .run(); @@ -767,7 +814,7 @@ } #[cargo_test] -fn required_features_build_dep() { +fn required_features_host_dep() { // Check that required-features handles build-dependencies correctly. let p = project() .file( @@ -817,13 +864,13 @@ ) .run(); - p.cargo("run --features bdep/f1 -Zfeatures=build_dep") + p.cargo("run --features bdep/f1 -Zfeatures=host_dep") .masquerade_as_nightly_cargo() .run(); } #[cargo_test] -fn disabled_shared_build_dep() { +fn disabled_shared_host_dep() { // Check for situation where an optional dep of a shared dep is enabled in // a normal dependency, but disabled in an optional one. The unit tree is: // foo @@ -888,8 +935,817 @@ ) .build(); - p.cargo("run -Zfeatures=build_dep -v") + p.cargo("run -Zfeatures=host_dep -v") .masquerade_as_nightly_cargo() .with_stdout("hello from somedep") .run(); } + +#[cargo_test] +fn required_features_inactive_dep() { + // required-features with an inactivated dep. + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [target.'cfg(whatever)'.dependencies] + bar = {path="bar"} + + [[bin]] + name = "foo" + required-features = ["feat1"] + + [features] + feat1 = [] + "#, + ) + .file("src/main.rs", "fn main() {}") + .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) + .file("bar/src/lib.rs", "") + .build(); + + p.cargo("check -Zfeatures=itarget") + .masquerade_as_nightly_cargo() + .with_stderr("[FINISHED] [..]") + .run(); + + p.cargo("check -Zfeatures=itarget --features=feat1") + .masquerade_as_nightly_cargo() + .with_stderr("[CHECKING] foo[..]\n[FINISHED] [..]") + .run(); +} + +#[cargo_test] +fn decouple_proc_macro() { + // proc macro features are not shared + Package::new("common", "1.0.0") + .feature("somefeat", &[]) + .file( + "src/lib.rs", + r#" + pub const fn foo() -> bool { cfg!(feature="somefeat") } + #[cfg(feature="somefeat")] + pub const FEAT_ONLY_CONST: bool = true; + "#, + ) + .publish(); + Package::new("pm", "1.0.0") + .proc_macro(true) + .feature_dep("common", "1.0", &["somefeat"]) + .file( + "src/lib.rs", + r#" + extern crate proc_macro; + extern crate common; + #[proc_macro] + pub fn foo(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + assert!(common::foo()); + "".parse().unwrap() + } + "#, + ) + .publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "1.0.0" + edition = "2018" + + [dependencies] + pm = "1.0" + common = "1.0" + "#, + ) + .file( + "src/lib.rs", + r#" + //! Test with docs. + //! + //! ```rust + //! pm::foo!{} + //! fn main() { + //! let expected = std::env::var_os("TEST_EXPECTS_ENABLED").is_some(); + //! assert_eq!(expected, common::foo(), "common is wrong"); + //! } + //! ``` + "#, + ) + .file( + "src/main.rs", + r#" + pm::foo!{} + fn main() { + println!("it is {}", common::foo()); + } + "#, + ) + .build(); + + p.cargo("run") + .env("TEST_EXPECTS_ENABLED", "1") + .with_stdout("it is true") + .run(); + + p.cargo("run -Zfeatures=host_dep") + .masquerade_as_nightly_cargo() + .with_stdout("it is false") + .run(); + + // Make sure the test is fallible. + p.cargo("test --doc") + .with_status(101) + .with_stdout_contains("[..]common is wrong[..]") + .run(); + + p.cargo("test --doc").env("TEST_EXPECTS_ENABLED", "1").run(); + + p.cargo("test --doc -Zfeatures=host_dep") + .masquerade_as_nightly_cargo() + .run(); + + p.cargo("doc").run(); + assert!(p + .build_dir() + .join("doc/common/constant.FEAT_ONLY_CONST.html") + .exists()); + // cargo doc should clean in-between runs, but it doesn't, and leaves stale files. + // https://github.com/rust-lang/cargo/issues/6783 (same for removed items) + p.build_dir().join("doc").rm_rf(); + + p.cargo("doc -Zfeatures=host_dep") + .masquerade_as_nightly_cargo() + .run(); + assert!(!p + .build_dir() + .join("doc/common/constant.FEAT_ONLY_CONST.html") + .exists()); +} + +#[cargo_test] +fn proc_macro_ws() { + // Checks for bug with proc-macro in a workspace with dependency (shouldn't panic). + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["foo", "pm"] + "#, + ) + .file( + "foo/Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [features] + feat1 = [] + "#, + ) + .file("foo/src/lib.rs", "") + .file( + "pm/Cargo.toml", + r#" + [package] + name = "pm" + version = "0.1.0" + + [lib] + proc-macro = true + + [dependencies] + foo = { path = "../foo", features=["feat1"] } + "#, + ) + .file("pm/src/lib.rs", "") + .build(); + + p.cargo("check -p pm -Zfeatures=host_dep -v") + .masquerade_as_nightly_cargo() + .with_stderr_contains("[RUNNING] `rustc --crate-name foo [..]--cfg[..]feat1[..]") + .run(); + // This may be surprising that `foo` doesn't get built separately. It is + // because pm might have other units (binaries, tests, etc.), and so the + // feature resolver must assume that normal deps get unified with it. This + // is related to the bigger issue where the features selected in a + // workspace depend on which packages are selected. + p.cargo("check --workspace -Zfeatures=host_dep -v") + .masquerade_as_nightly_cargo() + .with_stderr( + "\ +[FRESH] foo v0.1.0 [..] +[FRESH] pm v0.1.0 [..] +[FINISHED] dev [..] +", + ) + .run(); + // Selecting just foo will build without unification. + p.cargo("check -p foo -Zfeatures=host_dep -v") + .masquerade_as_nightly_cargo() + // Make sure `foo` is built without feat1 + .with_stderr_line_without(&["[RUNNING] `rustc --crate-name foo"], &["--cfg[..]feat1"]) + .run(); +} + +#[cargo_test] +fn has_dev_dep_for_test() { + // Check for a bug where the decision on whether or not "dev dependencies" + // should be used did not consider `check --profile=test`. + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dev-dependencies] + dep = { path = 'dep', features = ['f1'] } + "#, + ) + .file( + "src/lib.rs", + r#" + #[test] + fn t1() { + dep::f(); + } + "#, + ) + .file( + "dep/Cargo.toml", + r#" + [package] + name = "dep" + version = "0.1.0" + + [features] + f1 = [] + "#, + ) + .file( + "dep/src/lib.rs", + r#" + #[cfg(feature = "f1")] + pub fn f() {} + "#, + ) + .build(); + + p.cargo("check -v") + .with_stderr( + "\ +[CHECKING] foo v0.1.0 [..] +[RUNNING] `rustc --crate-name foo [..] +[FINISHED] [..] +", + ) + .run(); + p.cargo("check -v --profile=test -Zfeatures=dev_dep") + .masquerade_as_nightly_cargo() + .with_stderr( + "\ +[CHECKING] dep v0.1.0 [..] +[RUNNING] `rustc --crate-name dep [..] +[CHECKING] foo v0.1.0 [..] +[RUNNING] `rustc --crate-name foo [..] +[FINISHED] [..] +", + ) + .run(); + p.cargo("check -v --profile=test") + .with_stderr( + "\ +[FRESH] dep [..] +[FRESH] foo [..] +[FINISHED] [..] +", + ) + .run(); +} + +#[cargo_test] +fn build_dep_activated() { + // Build dependencies always match the host for [target.*.build-dependencies]. + if cross_compile::disabled() { + return; + } + Package::new("somedep", "1.0.0") + .file("src/lib.rs", "") + .publish(); + Package::new("targetdep", "1.0.0").publish(); + Package::new("hostdep", "1.0.0") + // Check that "for_host" is sticky. + .target_dep("somedep", "1.0", &rustc_host()) + .feature("feat1", &[]) + .file( + "src/lib.rs", + r#" + extern crate somedep; + + #[cfg(not(feature="feat1"))] + compile_error!{"feat1 missing"} + "#, + ) + .publish(); + + let p = project() + .file( + "Cargo.toml", + &format!( + r#" + [package] + name = "foo" + version = "0.1.0" + + # This should never be selected. + [target.'{}'.build-dependencies] + targetdep = "1.0" + + [target.'{}'.build-dependencies] + hostdep = {{version="1.0", features=["feat1"]}} + "#, + alternate(), + rustc_host() + ), + ) + .file("src/lib.rs", "") + .file("build.rs", "fn main() {}") + .build(); + + p.cargo("check").run(); + p.cargo("check -Zfeatures=all") + .masquerade_as_nightly_cargo() + .run(); + p.cargo("check --target").arg(alternate()).run(); + p.cargo("check -Zfeatures=all --target") + .arg(alternate()) + .masquerade_as_nightly_cargo() + .run(); +} + +#[cargo_test] +fn resolver_gated() { + // Check that `resolver` field is feature gated. + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + resolver = "2" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("build") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr( + "\ +error: failed to parse manifest at `[..]/foo/Cargo.toml` + +Caused by: + feature `resolver` is required + + consider adding `cargo-features = [\"resolver\"]` to the manifest +", + ) + .run(); + + // Test with virtual ws. + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["a"] + resolver = "2" + "#, + ) + .file("a/Cargo.toml", &basic_manifest("a", "0.1.0")) + .file("a/src/lib.rs", "") + .build(); + + p.cargo("build") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr( + "\ +error: failed to parse manifest at `[..]/foo/Cargo.toml` + +Caused by: + feature `resolver` is required + + consider adding `cargo-features = [\"resolver\"]` to the manifest +", + ) + .run(); +} + +#[cargo_test] +fn resolver_bad_setting() { + // Unknown setting in `resolver` + let p = project() + .file( + "Cargo.toml", + r#" + cargo-features = ["resolver"] + [package] + name = "foo" + version = "0.1.0" + resolver = "1" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("build") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr( + "\ +error: failed to parse manifest at `[..]/foo/Cargo.toml` + +Caused by: + `resolver` setting `1` is not valid, only valid option is \"2\" +", + ) + .run(); +} + +#[cargo_test] +fn resolver_not_both() { + // Can't specify resolver in both workspace and package. + let p = project() + .file( + "Cargo.toml", + r#" + cargo-features = ["resolver"] + [workspace] + resolver = "2" + [package] + name = "foo" + version = "0.1.0" + resolver = "2" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("build") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr( + "\ +error: failed to parse manifest at `[..]/foo/Cargo.toml` + +Caused by: + cannot specify `resolver` field in both `[workspace]` and `[package]` +", + ) + .run(); +} + +#[cargo_test] +fn resolver_ws_member() { + // Can't specify `resolver` in a ws member. + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["a"] + "#, + ) + .file( + "a/Cargo.toml", + r#" + cargo-features = ["resolver"] + [package] + name = "a" + version = "0.1.0" + resolver = "2" + "#, + ) + .file("a/src/lib.rs", "") + .build(); + + p.cargo("check") + .masquerade_as_nightly_cargo() + .with_stderr( + "\ +warning: resolver for the non root package will be ignored, specify resolver at the workspace root: +package: [..]/foo/a/Cargo.toml +workspace: [..]/foo/Cargo.toml +[CHECKING] a v0.1.0 [..] +[FINISHED] [..] +", + ) + .run(); +} + +#[cargo_test] +fn resolver_ws_root_and_member() { + // Check when specified in both ws root and member. + let p = project() + .file( + "Cargo.toml", + r#" + cargo-features = ["resolver"] + [workspace] + members = ["a"] + resolver = "2" + "#, + ) + .file( + "a/Cargo.toml", + r#" + cargo-features = ["resolver"] + [package] + name = "a" + version = "0.1.0" + resolver = "2" + "#, + ) + .file("a/src/lib.rs", "") + .build(); + + // Ignores if they are the same. + p.cargo("check") + .masquerade_as_nightly_cargo() + .with_stderr( + "\ +[CHECKING] a v0.1.0 [..] +[FINISHED] [..] +", + ) + .run(); +} + +#[cargo_test] +fn resolver_enables_new_features() { + // resolver="2" enables all the things. + Package::new("common", "1.0.0") + .feature("normal", &[]) + .feature("build", &[]) + .feature("dev", &[]) + .feature("itarget", &[]) + .file( + "src/lib.rs", + r#" + pub fn feats() -> u32 { + let mut res = 0; + if cfg!(feature="normal") { res |= 1; } + if cfg!(feature="build") { res |= 2; } + if cfg!(feature="dev") { res |= 4; } + if cfg!(feature="itarget") { res |= 8; } + res + } + "#, + ) + .publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + cargo-features = ["resolver"] + [workspace] + members = ["a", "b"] + resolver = "2" + "#, + ) + .file( + "a/Cargo.toml", + r#" + [package] + name = "a" + version = "0.1.0" + edition = "2018" + + [dependencies] + common = {version = "1.0", features=["normal"]} + + [dev-dependencies] + common = {version = "1.0", features=["dev"]} + + [build-dependencies] + common = {version = "1.0", features=["build"]} + + [target.'cfg(whatever)'.dependencies] + common = {version = "1.0", features=["itarget"]} + "#, + ) + .file( + "a/src/main.rs", + r#" + fn main() { + expect(); + } + + fn expect() { + let expected: u32 = std::env::var("EXPECTED_FEATS").unwrap().parse().unwrap(); + assert_eq!(expected, common::feats()); + } + + #[test] + fn from_test() { + expect(); + } + "#, + ) + .file( + "b/Cargo.toml", + r#" + [package] + name = "b" + version = "0.1.0" + + [features] + ping = [] + "#, + ) + .file( + "b/src/main.rs", + r#" + fn main() { + if cfg!(feature="ping") { + println!("pong"); + } + } + "#, + ) + .build(); + + // Only normal. + p.cargo("run --bin a") + .masquerade_as_nightly_cargo() + .env("EXPECTED_FEATS", "1") + .with_stderr( + "\ +[UPDATING] [..] +[DOWNLOADING] crates ... +[DOWNLOADED] common [..] +[COMPILING] common v1.0.0 +[COMPILING] a v0.1.0 [..] +[FINISHED] [..] +[RUNNING] `target/debug/a[EXE]` +", + ) + .run(); + + // only normal+dev + p.cargo("test") + .cwd("a") + .masquerade_as_nightly_cargo() + .env("EXPECTED_FEATS", "5") + .run(); + + // -Zpackage-features is enabled. + p.cargo("run -p b --features=ping") + .cwd("a") + .masquerade_as_nightly_cargo() + .with_stdout("pong") + .run(); +} + +#[cargo_test] +fn install_resolve_behavior() { + // install honors the resolver behavior. + Package::new("common", "1.0.0") + .feature("f1", &[]) + .file( + "src/lib.rs", + r#" + #[cfg(feature = "f1")] + compile_error!("f1 should not activate"); + "#, + ) + .publish(); + + Package::new("bar", "1.0.0").dep("common", "1.0").publish(); + + Package::new("foo", "1.0.0") + .file( + "Cargo.toml", + r#" + cargo-features = ["resolver"] + + [package] + name = "foo" + version = "1.0.0" + resolver = "2" + + [target.'cfg(whatever)'.dependencies] + common = {version="1.0", features=["f1"]} + + [dependencies] + bar = "1.0" + + "#, + ) + .file("src/main.rs", "fn main() {}") + .publish(); + + cargo_process("install foo") + .masquerade_as_nightly_cargo() + .run(); +} + +#[cargo_test] +fn package_includes_resolve_behavior() { + // `cargo package` will inherit the correct resolve behavior. + let p = project() + .file( + "Cargo.toml", + r#" + cargo-features = ["resolver"] + [workspace] + members = ["a"] + resolver = "2" + "#, + ) + .file( + "a/Cargo.toml", + r#" + [package] + name = "a" + version = "0.1.0" + authors = ["Zzz"] + description = "foo" + license = "MIT" + homepage = "https://example.com/" + "#, + ) + .file("a/src/lib.rs", "") + .build(); + + p.cargo("package") + .cwd("a") + .masquerade_as_nightly_cargo() + .run(); + + let rewritten_toml = format!( + r#"{} +cargo-features = ["resolver"] + +[package] +name = "a" +version = "0.1.0" +authors = ["Zzz"] +description = "foo" +homepage = "https://example.com/" +license = "MIT" +resolver = "2" +"#, + cargo::core::package::MANIFEST_PREAMBLE + ); + + let f = File::open(&p.root().join("target/package/a-0.1.0.crate")).unwrap(); + validate_crate_contents( + f, + "a-0.1.0.crate", + &["Cargo.toml", "Cargo.toml.orig", "src/lib.rs"], + &[("Cargo.toml", &rewritten_toml)], + ); +} + +#[cargo_test] +fn tree_all() { + // `cargo tree` with the new feature resolver. + Package::new("log", "0.4.8").feature("serde", &[]).publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [target.'cfg(whatever)'.dependencies] + log = {version="*", features=["serde"]} + "#, + ) + .file("src/lib.rs", "") + .build(); + p.cargo("tree --target=all -Zfeatures=all") + .masquerade_as_nightly_cargo() + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +└── log v0.4.8 +", + ) + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/features.rs cargo-0.47.0/tests/testsuite/features.rs --- cargo-0.44.1/tests/testsuite/features.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/features.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,11 +1,8 @@ //! Tests for `[features]` table. -use std::fs::File; -use std::io::prelude::*; - use cargo_test_support::paths::CargoPathExt; -use cargo_test_support::registry::Package; -use cargo_test_support::{basic_manifest, project, t}; +use cargo_test_support::registry::{Dependency, Package}; +use cargo_test_support::{basic_manifest, project}; #[cargo_test] fn invalid1() { @@ -101,7 +98,7 @@ Caused by: Feature `bar` depends on `baz` which is not an optional dependency. -Consider adding `optional = true` to the dependency + Consider adding `optional = true` to the dependency ", ) .run(); @@ -920,9 +917,7 @@ .build(); p.cargo("fetch").run(); - let loc = p.root().join("Cargo.lock"); - let mut lockfile = String::new(); - t!(t!(File::open(&loc)).read_to_string(&mut lockfile)); + let lockfile = p.read_lockfile(); assert!( lockfile.contains(r#"name = "d1""#), "d1 not found\n{}", @@ -1359,80 +1354,6 @@ } #[cargo_test] -fn combining_features_and_package() { - Package::new("dep", "1.0.0").publish(); - - let p = project() - .file( - "Cargo.toml", - r#" - [project] - name = "foo" - version = "0.0.1" - authors = [] - - [workspace] - members = ["bar"] - - [dependencies] - dep = "1" - "#, - ) - .file("src/lib.rs", "") - .file( - "bar/Cargo.toml", - r#" - [package] - name = "bar" - version = "0.0.1" - authors = [] - [features] - main = [] - "#, - ) - .file( - "bar/src/main.rs", - r#" - #[cfg(feature = "main")] - fn main() {} - "#, - ) - .build(); - - p.cargo("build -Z package-features --workspace --features main") - .masquerade_as_nightly_cargo() - .with_status(101) - .with_stderr_contains("[ERROR] cannot specify features for more than one package") - .run(); - - p.cargo("build -Z package-features --package dep --features main") - .masquerade_as_nightly_cargo() - .with_status(101) - .with_stderr_contains("[ERROR] cannot specify features for packages outside of workspace") - .run(); - p.cargo("build -Z package-features --package dep --all-features") - .masquerade_as_nightly_cargo() - .with_status(101) - .with_stderr_contains("[ERROR] cannot specify features for packages outside of workspace") - .run(); - p.cargo("build -Z package-features --package dep --no-default-features") - .masquerade_as_nightly_cargo() - .with_status(101) - .with_stderr_contains("[ERROR] cannot specify features for packages outside of workspace") - .run(); - - p.cargo("build -Z package-features --workspace --all-features") - .masquerade_as_nightly_cargo() - .run(); - p.cargo("run -Z package-features --package bar --features main") - .masquerade_as_nightly_cargo() - .run(); - p.cargo("build -Z package-features --package dep") - .masquerade_as_nightly_cargo() - .run(); -} - -#[cargo_test] fn namespaced_invalid_feature() { let p = project() .file( @@ -1535,7 +1456,7 @@ Caused by: Feature `bar` includes `crate:baz` which is not an optional dependency. -Consider adding `optional = true` to the dependency + Consider adding `optional = true` to the dependency ", ) .run(); @@ -1598,7 +1519,7 @@ Caused by: Feature `baz` includes the optional dependency of the same name, but this is left implicit in the features included by this feature. -Consider adding `crate:baz` to this feature's requirements. + Consider adding `crate:baz` to this feature's requirements. ", ) .run(); @@ -1634,8 +1555,8 @@ Caused by: Feature `baz` includes the dependency of the same name, but this is left implicit in the features included by this feature. -Additionally, the dependency must be marked as optional to be included in the feature definition. -Consider adding `crate:baz` to this feature's requirements and marking the dependency as `optional = true` + Additionally, the dependency must be marked as optional to be included in the feature definition. + Consider adding `crate:baz` to this feature's requirements and marking the dependency as `optional = true` ", ) .run(); @@ -1671,7 +1592,7 @@ Caused by: Feature `bar` includes `baz` which is not defined as a feature. -A non-optional dependency of the same name is defined; consider adding `optional = true` to its definition + A non-optional dependency of the same name is defined; consider adding `optional = true` to its definition ", ).run( ); @@ -2192,3 +2113,80 @@ p.cargo("check --features dep/feat").run(); } + +#[cargo_test] +fn registry_summary_order_doesnt_matter() { + // Checks for an issue where the resolver depended on the order of entries + // in the registry summary. If there was a non-optional dev-dependency + // that appeared before an optional normal dependency, then the resolver + // would not activate the optional dependency with a pkg/featname feature + // syntax. + Package::new("dep", "0.1.0") + .feature("feat1", &[]) + .file( + "src/lib.rs", + r#" + #[cfg(feature="feat1")] + pub fn work() { + println!("it works"); + } + "#, + ) + .publish(); + Package::new("bar", "0.1.0") + .feature("bar_feat", &["dep/feat1"]) + .add_dep(Dependency::new("dep", "0.1.0").dev()) + .add_dep(Dependency::new("dep", "0.1.0").optional(true)) + .file( + "src/lib.rs", + r#" + // This will fail to compile without `dep` optional dep activated. + extern crate dep; + + pub fn doit() { + dep::work(); + } + "#, + ) + .publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + edition = "2018" + + [dependencies] + bar = { version="0.1", features = ["bar_feat"] } + "#, + ) + .file( + "src/main.rs", + r#" + fn main() { + bar::doit(); + } + "#, + ) + .build(); + + p.cargo("run") + .with_stderr( + "\ +[UPDATING] [..] +[DOWNLOADING] crates ... +[DOWNLOADED] [..] +[DOWNLOADED] [..] +[COMPILING] dep v0.1.0 +[COMPILING] bar v0.1.0 +[COMPILING] foo v0.1.0 [..] +[FINISHED] [..] +[RUNNING] `target/debug/foo[EXE]` +", + ) + .with_stdout("it works") + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/fix.rs cargo-0.47.0/tests/testsuite/fix.rs --- cargo-0.44.1/tests/testsuite/fix.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/fix.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,11 +1,8 @@ //! Tests for the `cargo fix` command. -use std::fs::File; - use cargo_test_support::git; -use cargo_test_support::{basic_manifest, command_is_available, project}; - -use std::io::Write; +use cargo_test_support::paths; +use cargo_test_support::{basic_manifest, project}; #[cargo_test] fn do_not_fix_broken_builds() { @@ -712,7 +709,7 @@ fn warns_about_dirty_working_directory() { let p = git::new("foo", |p| p.file("src/lib.rs", "pub fn foo() {}")); - File::create(p.root().join("src/lib.rs")).unwrap(); + p.change_file("src/lib.rs", ""); p.cargo("fix") .with_status(101) @@ -736,10 +733,7 @@ fn warns_about_staged_working_directory() { let (p, repo) = git::new_repo("foo", |p| p.file("src/lib.rs", "pub fn foo() {}")); - File::create(&p.root().join("src/lib.rs")) - .unwrap() - .write_all("pub fn bar() {}".to_string().as_bytes()) - .unwrap(); + p.change_file("src/lib.rs", "pub fn bar() {}"); git::add(&repo); p.cargo("fix") @@ -773,7 +767,7 @@ .file(".gitignore", "bar\n") }); - File::create(p.root().join("bar")).unwrap(); + p.change_file("bar", ""); p.cargo("fix").run(); } @@ -1068,11 +1062,8 @@ } #[cargo_test] +#[cfg(unix)] fn does_not_crash_with_rustc_wrapper() { - // We don't have /usr/bin/env on Windows. - if cfg!(windows) { - return; - } let p = project() .file( "Cargo.toml", @@ -1091,6 +1082,49 @@ } #[cargo_test] +#[cfg(unix)] +fn does_not_crash_with_rustc_workspace_wrapper() { + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("fix --allow-no-vcs --verbose -Zunstable-options") + .env("RUSTC_WORKSPACE_WRAPPER", "/usr/bin/env") + .masquerade_as_nightly_cargo() + .run(); +} + +#[cargo_test] +fn uses_workspace_wrapper_and_primary_wrapper_override() { + // We don't have /usr/bin/env on Windows. + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("fix --allow-no-vcs --verbose -Zunstable-options") + .env("RUSTC_WORKSPACE_WRAPPER", paths::echo_wrapper()) + .masquerade_as_nightly_cargo() + .with_stderr_contains("WRAPPER CALLED: rustc src/lib.rs --crate-name foo [..]") + .run(); +} + +#[cargo_test] fn only_warn_for_relevant_crates() { let p = project() .file( @@ -1252,47 +1286,6 @@ } #[cargo_test] -fn fix_with_clippy() { - if !command_is_available("clippy-driver") { - return; - } - - let p = project() - .file( - "src/lib.rs", - " - pub fn foo() { - let mut v = Vec::::new(); - let _ = v.iter_mut().filter(|&ref a| a.is_empty()); - } - ", - ) - .build(); - - let stderr = "\ -[CHECKING] foo v0.0.1 ([..]) -[FIXING] src/lib.rs (1 fix) -[FINISHED] [..] -"; - - p.cargo("fix -Zunstable-options --clippy --allow-no-vcs") - .masquerade_as_nightly_cargo() - .with_stderr(stderr) - .with_stdout("") - .run(); - - assert_eq!( - p.read_file("src/lib.rs"), - " - pub fn foo() { - let mut v = Vec::::new(); - let _ = v.iter_mut().filter(|a| a.is_empty()); - } - " - ); -} - -#[cargo_test] fn fix_color_message() { // Check that color appears in diagnostics. let p = project() diff -Nru cargo-0.44.1/tests/testsuite/freshness.rs cargo-0.47.0/tests/testsuite/freshness.rs --- cargo-0.44.1/tests/testsuite/freshness.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/freshness.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,18 +1,19 @@ //! Tests for fingerprinting (rebuild detection). use filetime::FileTime; -use std::fs::{self, File, OpenOptions}; +use std::fs::{self, OpenOptions}; use std::io; use std::io::prelude::*; use std::net::TcpListener; use std::path::{Path, PathBuf}; +use std::process::Stdio; use std::thread; use std::time::SystemTime; +use super::death; use cargo_test_support::paths::{self, CargoPathExt}; use cargo_test_support::registry::Package; -use cargo_test_support::sleep_ms; -use cargo_test_support::{basic_manifest, is_coarse_mtime, project}; +use cargo_test_support::{basic_manifest, is_coarse_mtime, project, rustc_host, sleep_ms}; #[cargo_test] fn modifying_and_moving() { @@ -34,10 +35,7 @@ p.root().move_into_the_past(); p.root().join("target").move_into_the_past(); - File::create(&p.root().join("src/a.rs")) - .unwrap() - .write_all(b"#[allow(unused)]fn main() {}") - .unwrap(); + p.change_file("src/a.rs", "#[allow(unused)]fn main() {}"); p.cargo("build") .with_stderr( "\ @@ -78,16 +76,8 @@ assert!(p.bin("foo").is_file()); let lib = p.root().join("src/lib.rs"); - let bin = p.root().join("src/b.rs"); - - File::create(&lib) - .unwrap() - .write_all(b"invalid rust code") - .unwrap(); - File::create(&bin) - .unwrap() - .write_all(b"#[allow(unused)]fn foo() {}") - .unwrap(); + p.change_file("src/lib.rs", "invalid rust code"); + p.change_file("src/b.rs", "#[allow(unused)]fn foo() {}"); lib.move_into_the_past(); // Make sure the binary is rebuilt, not the lib @@ -501,8 +491,8 @@ /* Targets should be cached from the first build */ let mut e = p.cargo("build"); - // MSVC does not include hash in binary filename, so it gets recompiled. - if cfg!(target_env = "msvc") { + // MSVC/apple does not include hash in binary filename, so it gets recompiled. + if cfg!(any(target_env = "msvc", target_vendor = "apple")) { e.with_stderr("[COMPILING] foo[..]\n[FINISHED] dev[..]"); } else { e.with_stderr("[FINISHED] dev[..]"); @@ -511,7 +501,7 @@ p.rename_run("foo", "off2").with_stdout("feature off").run(); let mut e = p.cargo("build --features foo"); - if cfg!(target_env = "msvc") { + if cfg!(any(target_env = "msvc", target_vendor = "apple")) { e.with_stderr("[COMPILING] foo[..]\n[FINISHED] dev[..]"); } else { e.with_stderr("[FINISHED] dev[..]"); @@ -538,7 +528,7 @@ p.cargo("test").run(); sleep_ms(1000); - File::create(&p.root().join("src/lib.rs")).unwrap(); + p.change_file("src/lib.rs", ""); p.cargo("build -v").run(); p.cargo("test -v") @@ -838,18 +828,16 @@ ) .run(); - File::create(&p.root().join("Cargo.toml")) - .unwrap() - .write_all( - br#" - [package] - name = "foo" - description = "new desc" - version = "0.0.1" - authors = [] - "#, - ) - .unwrap(); + p.change_file( + "Cargo.toml", + r#" + [package] + name = "foo" + description = "new desc" + version = "0.0.1" + authors = [] + "#, + ); p.cargo("run") .with_stdout("new desc") @@ -1427,8 +1415,8 @@ .with_stderr_unordered( "\ [COMPILING] bar [..] -[RUNNING] `rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C debuginfo=2 [..] -[RUNNING] `rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C panic=abort -C debuginfo=2 [..] +[RUNNING] `rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link[..]-C debuginfo=2 [..] +[RUNNING] `rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C panic=abort[..]-C debuginfo=2 [..] [COMPILING] somepm [..] [RUNNING] `rustc --crate-name somepm [..] [COMPILING] foo [..] @@ -1471,7 +1459,7 @@ sleep_ms(1000); } - File::create(&p.root().join("reg1new/src/lib.rs")).unwrap(); + p.change_file("reg1new/src/lib.rs", ""); if is_coarse_mtime() { sleep_ms(1000); } @@ -2150,3 +2138,444 @@ .with_stderr("[FINISHED] [..]") .run(); } + +#[cargo_test] +fn channel_shares_filenames() { + // Test that different "nightly" releases use the same output filename. + + // Create separate rustc binaries to emulate running different toolchains. + let nightly1 = format!( + "\ +rustc 1.44.0-nightly (38114ff16 2020-03-21) +binary: rustc +commit-hash: 38114ff16e7856f98b2b4be7ab4cd29b38bed59a +commit-date: 2020-03-21 +host: {} +release: 1.44.0-nightly +LLVM version: 9.0 +", + rustc_host() + ); + + let nightly2 = format!( + "\ +rustc 1.44.0-nightly (a5b09d354 2020-03-31) +binary: rustc +commit-hash: a5b09d35473615e7142f5570f5c5fad0caf68bd2 +commit-date: 2020-03-31 +host: {} +release: 1.44.0-nightly +LLVM version: 9.0 +", + rustc_host() + ); + + let beta1 = format!( + "\ +rustc 1.43.0-beta.3 (4c587bbda 2020-03-25) +binary: rustc +commit-hash: 4c587bbda04ab55aaf56feab11dfdfe387a85d7a +commit-date: 2020-03-25 +host: {} +release: 1.43.0-beta.3 +LLVM version: 9.0 +", + rustc_host() + ); + + let beta2 = format!( + "\ +rustc 1.42.0-beta.5 (4e1c5f0e9 2020-02-28) +binary: rustc +commit-hash: 4e1c5f0e9769a588b91c977e3d81e140209ef3a2 +commit-date: 2020-02-28 +host: {} +release: 1.42.0-beta.5 +LLVM version: 9.0 +", + rustc_host() + ); + + let stable1 = format!( + "\ +rustc 1.42.0 (b8cedc004 2020-03-09) +binary: rustc +commit-hash: b8cedc00407a4c56a3bda1ed605c6fc166655447 +commit-date: 2020-03-09 +host: {} +release: 1.42.0 +LLVM version: 9.0 +", + rustc_host() + ); + + let stable2 = format!( + "\ +rustc 1.41.1 (f3e1a954d 2020-02-24) +binary: rustc +commit-hash: f3e1a954d2ead4e2fc197c7da7d71e6c61bad196 +commit-date: 2020-02-24 +host: {} +release: 1.41.1 +LLVM version: 9.0 +", + rustc_host() + ); + + let compiler = project() + .at("compiler") + .file("Cargo.toml", &basic_manifest("compiler", "0.1.0")) + .file( + "src/main.rs", + r#" + fn main() { + if std::env::args_os().any(|a| a == "-vV") { + print!("{}", env!("FUNKY_VERSION_TEST")); + return; + } + let mut cmd = std::process::Command::new("rustc"); + cmd.args(std::env::args_os().skip(1)); + assert!(cmd.status().unwrap().success()); + } + "#, + ) + .build(); + + let makeit = |version, vv| { + // Force a rebuild. + compiler.target_debug_dir().join("deps").rm_rf(); + compiler.cargo("build").env("FUNKY_VERSION_TEST", vv).run(); + fs::rename(compiler.bin("compiler"), compiler.bin(version)).unwrap(); + }; + makeit("nightly1", nightly1); + makeit("nightly2", nightly2); + makeit("beta1", beta1); + makeit("beta2", beta2); + makeit("stable1", stable1); + makeit("stable2", stable2); + + // Run `cargo check` with different rustc versions to observe its behavior. + let p = project().file("src/lib.rs", "").build(); + + // Runs `cargo check` and returns the rmeta filename created. + // Checks that the freshness matches the given value. + let check = |version, fresh| -> String { + let output = p + .cargo("check --message-format=json") + .env("RUSTC", compiler.bin(version)) + .exec_with_output() + .unwrap(); + // Collect the filenames generated. + let mut artifacts: Vec<_> = std::str::from_utf8(&output.stdout) + .unwrap() + .lines() + .filter_map(|line| { + let value: serde_json::Value = serde_json::from_str(line).unwrap(); + if value["reason"].as_str().unwrap() == "compiler-artifact" { + assert_eq!(value["fresh"].as_bool().unwrap(), fresh); + let filenames = value["filenames"].as_array().unwrap(); + assert_eq!(filenames.len(), 1); + Some(filenames[0].to_string()) + } else { + None + } + }) + .collect(); + // Should only generate one rmeta file. + assert_eq!(artifacts.len(), 1); + artifacts.pop().unwrap() + }; + + let nightly1_name = check("nightly1", false); + assert_eq!(check("nightly1", true), nightly1_name); + assert_eq!(check("nightly2", false), nightly1_name); // same as before + assert_eq!(check("nightly2", true), nightly1_name); + // Should rebuild going back to nightly1. + assert_eq!(check("nightly1", false), nightly1_name); + + let beta1_name = check("beta1", false); + assert_ne!(beta1_name, nightly1_name); + assert_eq!(check("beta1", true), beta1_name); + assert_eq!(check("beta2", false), beta1_name); // same as before + assert_eq!(check("beta2", true), beta1_name); + // Should rebuild going back to beta1. + assert_eq!(check("beta1", false), beta1_name); + + let stable1_name = check("stable1", false); + assert_ne!(stable1_name, nightly1_name); + assert_ne!(stable1_name, beta1_name); + let stable2_name = check("stable2", false); + assert_ne!(stable1_name, stable2_name); + // Check everything is fresh. + assert_eq!(check("stable1", true), stable1_name); + assert_eq!(check("stable2", true), stable2_name); + assert_eq!(check("beta1", true), beta1_name); + assert_eq!(check("nightly1", true), nightly1_name); +} + +#[cargo_test] +fn linking_interrupted() { + // Interrupt during the linking phase shouldn't leave test executable as "fresh". + + // This is used to detect when linking starts, then to pause the linker so + // that the test can kill cargo. + let link_listener = TcpListener::bind("127.0.0.1:0").unwrap(); + let link_addr = link_listener.local_addr().unwrap(); + + // This is used to detect when rustc exits. + let rustc_listener = TcpListener::bind("127.0.0.1:0").unwrap(); + let rustc_addr = rustc_listener.local_addr().unwrap(); + + // Create a linker that we can interrupt. + let linker = project() + .at("linker") + .file("Cargo.toml", &basic_manifest("linker", "1.0.0")) + .file( + "src/main.rs", + &r#" + fn main() { + // Figure out the output filename. + let output = match std::env::args().find(|a| a.starts_with("/OUT:")) { + Some(s) => s[5..].to_string(), + None => { + let mut args = std::env::args(); + loop { + if args.next().unwrap() == "-o" { + break; + } + } + args.next().unwrap() + } + }; + std::fs::remove_file(&output).unwrap(); + std::fs::write(&output, "").unwrap(); + // Tell the test that we are ready to be interrupted. + let mut socket = std::net::TcpStream::connect("__ADDR__").unwrap(); + // Wait for the test to kill us. + std::thread::sleep(std::time::Duration::new(60, 0)); + } + "# + .replace("__ADDR__", &link_addr.to_string()), + ) + .build(); + linker.cargo("build").run(); + + // Create a wrapper around rustc that will tell us when rustc is finished. + let rustc = project() + .at("rustc-waiter") + .file("Cargo.toml", &basic_manifest("rustc-waiter", "1.0.0")) + .file( + "src/main.rs", + &r#" + fn main() { + let mut conn = None; + // Check for a normal build (not -vV or --print). + if std::env::args().any(|arg| arg == "t1") { + // Tell the test that rustc has started. + conn = Some(std::net::TcpStream::connect("__ADDR__").unwrap()); + } + let status = std::process::Command::new("rustc") + .args(std::env::args().skip(1)) + .status() + .expect("rustc to run"); + std::process::exit(status.code().unwrap_or(1)); + } + "# + .replace("__ADDR__", &rustc_addr.to_string()), + ) + .build(); + rustc.cargo("build").run(); + + // Build it once so that the fingerprint gets saved to disk. + let p = project() + .file("src/lib.rs", "") + .file("tests/t1.rs", "") + .build(); + p.cargo("test --test t1 --no-run").run(); + + // Make a change, start a build, then interrupt it. + p.change_file("src/lib.rs", "// modified"); + let linker_env = format!( + "CARGO_TARGET_{}_LINKER", + rustc_host().to_uppercase().replace('-', "_") + ); + // NOTE: This assumes that the paths to the linker or rustc are not in the + // fingerprint. But maybe they should be? + let mut cmd = p + .cargo("test --test t1 --no-run") + .env(&linker_env, linker.bin("linker")) + .env("RUSTC", rustc.bin("rustc-waiter")) + .build_command(); + let mut child = cmd + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .env("__CARGO_TEST_SETSID_PLEASE_DONT_USE_ELSEWHERE", "1") + .spawn() + .unwrap(); + // Wait for rustc to start. + let mut rustc_conn = rustc_listener.accept().unwrap().0; + // Wait for linking to start. + drop(link_listener.accept().unwrap()); + + // Interrupt the child. + death::ctrl_c(&mut child); + assert!(!child.wait().unwrap().success()); + // Wait for rustc to exit. If we don't wait, then the command below could + // start while rustc is still being torn down. + let mut buf = [0]; + drop(rustc_conn.read_exact(&mut buf)); + + // Build again, shouldn't be fresh. + p.cargo("test --test t1") + .with_stderr( + "\ +[COMPILING] foo [..] +[FINISHED] [..] +[RUNNING] target/debug/deps/t1[..] +", + ) + .run(); +} + +#[cargo_test] +#[cfg_attr( + not(all(target_arch = "x86_64", target_os = "windows", target_env = "msvc")), + ignore +)] +fn lld_is_fresh() { + // Check for bug when using lld linker that it remains fresh with dylib. + let p = project() + .file( + ".cargo/config", + r#" + [target.x86_64-pc-windows-msvc] + linker = "rust-lld" + rustflags = ["-C", "link-arg=-fuse-ld=lld"] + "#, + ) + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [lib] + crate-type = ["dylib"] + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("build").run(); + p.cargo("build -v") + .with_stderr("[FRESH] foo [..]\n[FINISHED] [..]") + .run(); +} + +#[cargo_test] +fn env_in_code_causes_rebuild() { + // Only nightly 1.46 has support in dep-info files for this + if !cargo_test_support::is_nightly() { + return; + } + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + "#, + ) + .file( + "src/main.rs", + r#" + fn main() { + println!("{:?}", option_env!("FOO")); + println!("{:?}", option_env!("FOO\nBAR")); + } + "#, + ) + .build(); + + p.cargo("build").env_remove("FOO").run(); + p.cargo("build") + .env_remove("FOO") + .with_stderr("[FINISHED] [..]") + .run(); + p.cargo("build") + .env("FOO", "bar") + .with_stderr("[COMPILING][..]\n[FINISHED][..]") + .run(); + p.cargo("build") + .env("FOO", "bar") + .with_stderr("[FINISHED][..]") + .run(); + p.cargo("build") + .env("FOO", "baz") + .with_stderr("[COMPILING][..]\n[FINISHED][..]") + .run(); + p.cargo("build") + .env("FOO", "baz") + .with_stderr("[FINISHED][..]") + .run(); + p.cargo("build") + .env_remove("FOO") + .with_stderr("[COMPILING][..]\n[FINISHED][..]") + .run(); + p.cargo("build") + .env_remove("FOO") + .with_stderr("[FINISHED][..]") + .run(); + + let interesting = " #!$\nabc\r\\\t\u{8}\r\n"; + p.cargo("build").env("FOO", interesting).run(); + p.cargo("build") + .env("FOO", interesting) + .with_stderr("[FINISHED][..]") + .run(); + + p.cargo("build").env("FOO\nBAR", interesting).run(); + p.cargo("build") + .env("FOO\nBAR", interesting) + .with_stderr("[FINISHED][..]") + .run(); +} + +#[cargo_test] +fn env_build_script_no_rebuild() { + // Only nightly 1.46 has support in dep-info files for this + if !cargo_test_support::is_nightly() { + return; + } + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + "#, + ) + .file( + "build.rs", + r#" + fn main() { + println!("cargo:rustc-env=FOO=bar"); + } + "#, + ) + .file( + "src/main.rs", + r#" + fn main() { + println!("{:?}", env!("FOO")); + } + "#, + ) + .build(); + + p.cargo("build").run(); + p.cargo("build").with_stderr("[FINISHED] [..]").run(); +} diff -Nru cargo-0.44.1/tests/testsuite/generate_lockfile.rs cargo-0.47.0/tests/testsuite/generate_lockfile.rs --- cargo-0.44.1/tests/testsuite/generate_lockfile.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/generate_lockfile.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,10 +1,8 @@ //! Tests for the `cargo generate-lockfile` command. -use std::fs::{self, File}; -use std::io::prelude::*; - use cargo_test_support::registry::Package; use cargo_test_support::{basic_manifest, paths, project, ProjectBuilder}; +use std::fs; #[cargo_test] fn adding_and_removing_packages() { @@ -16,33 +14,27 @@ p.cargo("generate-lockfile").run(); - let toml = p.root().join("Cargo.toml"); let lock1 = p.read_lockfile(); // add a dep - File::create(&toml) - .unwrap() - .write_all( - br#" - [package] - name = "foo" - authors = [] - version = "0.0.1" - - [dependencies.bar] - path = "bar" - "#, - ) - .unwrap(); + p.change_file( + "Cargo.toml", + r#" + [package] + name = "foo" + authors = [] + version = "0.0.1" + + [dependencies.bar] + path = "bar" + "#, + ); p.cargo("generate-lockfile").run(); let lock2 = p.read_lockfile(); assert_ne!(lock1, lock2); // change the dep - File::create(&p.root().join("bar/Cargo.toml")) - .unwrap() - .write_all(basic_manifest("bar", "0.0.2").as_bytes()) - .unwrap(); + p.change_file("bar/Cargo.toml", &basic_manifest("bar", "0.0.2")); p.cargo("generate-lockfile").run(); let lock3 = p.read_lockfile(); assert_ne!(lock1, lock3); @@ -50,17 +42,15 @@ // remove the dep println!("lock4"); - File::create(&toml) - .unwrap() - .write_all( - br#" - [package] - name = "foo" - authors = [] - version = "0.0.1" - "#, - ) - .unwrap(); + p.change_file( + "Cargo.toml", + r#" + [package] + name = "foo" + authors = [] + version = "0.0.1" + "#, + ); p.cargo("generate-lockfile").run(); let lock4 = p.read_lockfile(); assert_eq!(lock1, lock4); @@ -112,13 +102,9 @@ bar = "baz" foo = "bar" "#; - let lockfile = p.root().join("Cargo.lock"); let lock = p.read_lockfile(); let data = lock + metadata; - File::create(&lockfile) - .unwrap() - .write_all(data.as_bytes()) - .unwrap(); + p.change_file("Cargo.lock", &data); // Build and make sure the metadata is still there p.cargo("build").run(); @@ -149,12 +135,7 @@ assert!(lock0.starts_with("# This file is automatically @generated by Cargo.\n# It is not intended for manual editing.\n")); let lock1 = lock0.replace("\n", "\r\n"); - { - File::create(&lockfile) - .unwrap() - .write_all(lock1.as_bytes()) - .unwrap(); - } + p.change_file("Cargo.lock", &lock1); p.cargo("generate-lockfile").run(); diff -Nru cargo-0.44.1/tests/testsuite/git_auth.rs cargo-0.47.0/tests/testsuite/git_auth.rs --- cargo-0.44.1/tests/testsuite/git_auth.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/git_auth.rs 2020-07-17 20:39:39.000000000 +0000 @@ -3,17 +3,15 @@ use std::collections::HashSet; use std::io::prelude::*; use std::io::BufReader; -use std::net::TcpListener; +use std::net::{SocketAddr, TcpListener}; use std::sync::atomic::{AtomicUsize, Ordering::SeqCst}; use std::sync::Arc; -use std::thread; +use std::thread::{self, JoinHandle}; use cargo_test_support::paths; use cargo_test_support::{basic_manifest, project}; -// Tests that HTTP auth is offered from `credential.helper`. -#[cargo_test] -fn http_auth_offered() { +fn setup_failed_auth_test() -> (SocketAddr, JoinHandle<()>, Arc) { let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); @@ -100,7 +98,13 @@ &script.display().to_string().replace("\\", "/"), ) .unwrap(); + (addr, t, connections) +} +// Tests that HTTP auth is offered from `credential.helper`. +#[cargo_test] +fn http_auth_offered() { + let (addr, t, connections) = setup_failed_auth_test(); let p = project() .file( "Cargo.toml", @@ -146,7 +150,11 @@ Caused by: failed to authenticate when downloading repository -attempted to find username/password via `credential.helper`, but [..] + + * attempted to find username/password via `credential.helper`, but [..] + + if the git CLI succeeds then `net.git-fetch-with-cli` may help here + https://[..] Caused by: ", @@ -265,3 +273,126 @@ .run(); t.join().ok().unwrap(); } + +#[cargo_test] +fn net_err_suggests_fetch_with_cli() { + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.0.0" + authors = [] + + [dependencies] + foo = { git = "ssh://needs-proxy.invalid/git" } + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("build -v") + .with_status(101) + .with_stderr( + "\ +[UPDATING] git repository `ssh://needs-proxy.invalid/git` +warning: spurious network error[..] +warning: spurious network error[..] +[ERROR] failed to get `foo` as a dependency of package `foo v0.0.0 [..]` + +Caused by: + failed to load source for dependency `foo` + +Caused by: + Unable to update ssh://needs-proxy.invalid/git + +Caused by: + failed to clone into: [..] + +Caused by: + network failure seems to have happened + if a proxy or similar is necessary `net.git-fetch-with-cli` may help here + https://[..] + +Caused by: + failed to resolve address for needs-proxy.invalid[..] +", + ) + .run(); + + p.change_file( + ".cargo/config", + " + [net] + git-fetch-with-cli = true + ", + ); + + p.cargo("build -v") + .with_status(101) + .with_stderr_contains("[..]Unable to update[..]") + .with_stderr_does_not_contain("[..]try enabling `git-fetch-with-cli`[..]") + .run(); +} + +#[cargo_test] +fn instead_of_url_printed() { + let (addr, t, _connections) = setup_failed_auth_test(); + let config = paths::home().join(".gitconfig"); + let mut config = git2::Config::open(&config).unwrap(); + config + .set_str( + &format!("url.http://{}/.insteadOf", addr), + "https://foo.bar/", + ) + .unwrap(); + let p = project() + .file( + "Cargo.toml", + r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [dependencies.bar] + git = "https://foo.bar/foo/bar" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("build") + .with_status(101) + .with_stderr(&format!( + "\ +[UPDATING] git repository `https://foo.bar/foo/bar` +[ERROR] failed to get `bar` as a dependency of package `foo [..]` + +Caused by: + failed to load source for dependency `bar` + +Caused by: + Unable to update https://foo.bar/foo/bar + +Caused by: + failed to clone into: [..] + +Caused by: + failed to authenticate when downloading repository: http://{addr}/foo/bar + + * attempted to find username/password via `credential.helper`, but maybe the found credentials were incorrect + + if the git CLI succeeds then `net.git-fetch-with-cli` may help here + https://[..] + +Caused by: + [..] +", + addr = addr + )) + .run(); + + t.join().ok().unwrap(); +} diff -Nru cargo-0.44.1/tests/testsuite/git.rs cargo-0.47.0/tests/testsuite/git.rs --- cargo-0.44.1/tests/testsuite/git.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/git.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,7 +1,7 @@ //! Tests for git support. use std::env; -use std::fs::{self, File}; +use std::fs; use std::io::prelude::*; use std::net::{TcpListener, TcpStream}; use std::path::Path; @@ -501,10 +501,7 @@ let rev1 = repo.revparse_single("HEAD").unwrap().id(); // Commit the changes and make sure we trigger a recompile - File::create(&bar.root().join("src/lib.rs")) - .unwrap() - .write_all(br#"pub fn bar() -> i32 { 2 }"#) - .unwrap(); + bar.change_file("src/lib.rs", "pub fn bar() -> i32 { 2 }"); git::add(&repo); let rev2 = git::commit(&repo); @@ -623,10 +620,7 @@ p.cargo("build").with_stdout("").run(); // Modify a file manually, shouldn't trigger a recompile - File::create(&git_project.root().join("src/bar.rs")) - .unwrap() - .write_all(br#"pub fn bar() { println!("hello!"); }"#) - .unwrap(); + git_project.change_file("src/bar.rs", r#"pub fn bar() { println!("hello!"); }"#); p.cargo("build").with_stdout("").run(); @@ -764,10 +758,7 @@ .run(); // Modify a file manually, and commit it - File::create(&git_project.root().join("src/bar.rs")) - .unwrap() - .write_all(br#"pub fn bar() { println!("hello!"); }"#) - .unwrap(); + git_project.change_file("src/bar.rs", r#"pub fn bar() { println!("hello!"); }"#); let repo = git2::Repository::open(&git_project.root()).unwrap(); let old_head = repo.head().unwrap().target().unwrap(); git::add(&repo); @@ -785,11 +776,13 @@ .with_status(101) .with_stderr( "\ -[UPDATING] git repository [..] [ERROR] Unable to update [..] Caused by: - revspec '0.1.2' not found; [..] + precise value for git is not a git revision: 0.1.2 + +Caused by: + unable to parse OID - contains invalid characters; class=Invalid (3) ", ) .run(); @@ -878,6 +871,7 @@ .with_stderr( "\ [UPDATING] git repository [..] +[UPDATING] git submodule `file://[..]/dep2` [COMPILING] dep1 [..] [COMPILING] foo [..] [FINISHED] dev [unoptimized + debuginfo] target(s) in [..]\n", @@ -941,6 +935,7 @@ let expected = format!( "\ [UPDATING] git repository [..] +[UPDATING] git submodule `file://[..]/dep2` [ERROR] failed to get `dep1` as a dependency of package `foo v0.5.0 [..]` Caused by: @@ -1024,10 +1019,7 @@ ) .run(); - File::create(&git1.root().join("src/lib.rs")) - .unwrap() - .write_all(br#"pub fn foo() {}"#) - .unwrap(); + git1.change_file("src/lib.rs", "pub fn foo() {}"); let repo = git2::Repository::open(&git1.root()).unwrap(); git::add(&repo); let oid = git::commit(&repo); @@ -1084,10 +1076,7 @@ // Update the repo, and simulate someone else updating the lock file and then // us pulling it down. - File::create(&bar.root().join("src/lib.rs")) - .unwrap() - .write_all(br#"pub fn bar() -> i32 { 1 + 0 }"#) - .unwrap(); + bar.change_file("src/lib.rs", "pub fn bar() -> i32 { 1 + 0 }"); let repo = git2::Repository::open(&bar.root()).unwrap(); git::add(&repo); git::commit(&repo); @@ -1096,29 +1085,26 @@ let rev = repo.revparse_single("HEAD").unwrap().id(); - File::create(&foo.root().join("Cargo.lock")) - .unwrap() - .write_all( - format!( - r#" - [[package]] - name = "foo" - version = "0.0.0" - dependencies = [ - 'bar 0.0.0 (git+{url}#{hash})' - ] - - [[package]] - name = "bar" - version = "0.0.0" - source = 'git+{url}#{hash}' - "#, - url = bar.url(), - hash = rev - ) - .as_bytes(), - ) - .unwrap(); + foo.change_file( + "Cargo.lock", + &format!( + r#" + [[package]] + name = "foo" + version = "0.0.0" + dependencies = [ + 'bar 0.0.0 (git+{url}#{hash})' + ] + + [[package]] + name = "bar" + version = "0.0.0" + source = 'git+{url}#{hash}' + "#, + url = bar.url(), + hash = rev + ), + ); // Now build! foo.cargo("build") @@ -1182,6 +1168,7 @@ p.cargo("run") .with_stderr( "[UPDATING] git repository `[..]`\n\ + [UPDATING] git submodule `file://[..]/dep2`\n\ [COMPILING] dep1 v0.5.0 ([..])\n\ [COMPILING] foo v0.5.0 ([..])\n\ [FINISHED] dev [unoptimized + debuginfo] target(s) in \ @@ -1191,16 +1178,13 @@ .with_stdout("project2\n") .run(); - File::create(&git_project.root().join(".gitmodules")) - .unwrap() - .write_all( - format!( - "[submodule \"src\"]\n\tpath = src\n\turl={}", - git_project3.url() - ) - .as_bytes(), - ) - .unwrap(); + git_project.change_file( + ".gitmodules", + &format!( + "[submodule \"src\"]\n\tpath = src\n\turl={}", + git_project3.url() + ), + ); // Sync the submodule and reset it to the new remote. sub.sync().unwrap(); @@ -1229,6 +1213,7 @@ .with_stderr("") .with_stderr(&format!( "[UPDATING] git repository `{}`\n\ + [UPDATING] git submodule `file://[..]/dep3`\n\ [UPDATING] dep1 v0.5.0 ([..]) -> #[..]\n\ ", git_project.url() @@ -1357,7 +1342,7 @@ // Modify an ignored file and make sure we don't rebuild println!("second pass"); - File::create(&foo.root().join("src/bar.rs")).unwrap(); + foo.change_file("src/bar.rs", ""); foo.cargo("build").with_stdout("").run(); } @@ -1455,10 +1440,7 @@ .run(); // Make a commit to lock p2 to a different rev - File::create(&bar.root().join("src/lib.rs")) - .unwrap() - .write_all(br#"pub fn bar() -> i32 { 2 }"#) - .unwrap(); + bar.change_file("src/lib.rs", "pub fn bar() -> i32 { 2 }"); let repo = git2::Repository::open(&bar.root()).unwrap(); git::add(&repo); git::commit(&repo); @@ -1562,10 +1544,7 @@ p.process(&p.bin("foo")).with_stdout("0\n").run(); // Touching bar.rs.in should cause the `build` command to run again. - fs::File::create(&p.root().join("bar/src/bar.rs.in")) - .unwrap() - .write_all(b"pub fn gimme() -> i32 { 1 }") - .unwrap(); + p.change_file("bar/src/bar.rs.in", "pub fn gimme() -> i32 { 1 }"); p.cargo("build").run(); @@ -1832,23 +1811,20 @@ // Update the dependency to point to the second repository, but this // shouldn't update the transitive dependency which is the same. - File::create(&p.root().join("Cargo.toml")) - .unwrap() - .write_all( - format!( - r#" - [project] - name = "foo" - version = "0.5.0" - authors = [] - [dependencies.dep] - git = '{}' - "#, - dep2.url() - ) - .as_bytes(), - ) - .unwrap(); + p.change_file( + "Cargo.toml", + &format!( + r#" + [project] + name = "foo" + version = "0.5.0" + authors = [] + [dependencies.dep] + git = '{}' + "#, + dep2.url() + ), + ); p.cargo("build") .with_stderr(&format!( @@ -1908,19 +1884,12 @@ let rev1 = repo.revparse_single("HEAD").unwrap().id(); // Just be sure to change a file - File::create(&dep.root().join("src/lib.rs")) - .unwrap() - .write_all(br#"pub fn bar() -> i32 { 2 }"#) - .unwrap(); + dep.change_file("src/lib.rs", "pub fn bar() -> i32 { 2 }"); git::add(&repo); git::commit(&repo); p.cargo("update -p dep").run(); - let mut lockfile = String::new(); - File::open(&p.root().join("Cargo.lock")) - .unwrap() - .read_to_string(&mut lockfile) - .unwrap(); + let lockfile = p.read_lockfile(); assert!( !lockfile.contains(&rev1.to_string()), "{} in {}", @@ -1984,23 +1953,20 @@ ) .run(); - File::create(&p.root().join("b/Cargo.toml")) - .unwrap() - .write_all( - format!( - r#" - [project] - name = "b" - version = "0.5.0" - authors = [] - [dependencies.a] - git = '{}' - "#, - a2.url() - ) - .as_bytes(), - ) - .unwrap(); + p.change_file( + "b/Cargo.toml", + &format!( + r#" + [project] + name = "b" + version = "0.5.0" + authors = [] + [dependencies.a] + git = '{}' + "#, + a2.url() + ), + ); p.cargo("build") .with_stderr( @@ -2217,24 +2183,21 @@ p.cargo("build").run(); - File::create(p.root().join("a/Cargo.toml")) - .unwrap() - .write_all( - format!( - r#" - [package] - name = "a" - version = "0.0.1" - authors = [] - - [dependencies] - git = {{ git = '{}' }} - "#, - git.url() - ) - .as_bytes(), - ) - .unwrap(); + p.change_file( + "a/Cargo.toml", + &format!( + r#" + [package] + name = "a" + version = "0.0.1" + authors = [] + + [dependencies] + git = {{ git = '{}' }} + "#, + git.url() + ), + ); p.cargo("build").run(); } @@ -2580,25 +2543,23 @@ .file("src/main.rs", "fn main() {}") .build(); - File::create(paths::home().join(".gitconfig")) - .unwrap() - .write_all( - format!( - r#" + fs::write( + paths::home().join(".gitconfig"), + format!( + r#" [init] templatedir = {} "#, - git_project2 - .url() - .to_file_path() - .unwrap() - .to_str() - .unwrap() - .replace("\\", "/") - ) - .as_bytes(), - ) - .unwrap(); + git_project2 + .url() + .to_file_path() + .unwrap() + .to_str() + .unwrap() + .replace("\\", "/") + ), + ) + .unwrap(); p.cargo("build").run(); } diff -Nru cargo-0.44.1/tests/testsuite/init.rs cargo-0.47.0/tests/testsuite/init.rs --- cargo-0.44.1/tests/testsuite/init.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/init.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,12 +1,10 @@ //! Tests for the `cargo init` command. +use cargo_test_support::{command_is_available, paths, Execs}; use std::env; -use std::fs::{self, File}; -use std::io::prelude::*; +use std::fs; use std::process::Command; -use cargo_test_support::{command_is_available, paths, Execs}; - fn cargo_process(s: &str) -> Execs { let mut execs = cargo_test_support::cargo_process(s); execs.cwd(&paths::root()).env("HOME", &paths::home()); @@ -79,11 +77,7 @@ assert!(paths::root().join("foo/.gitignore").is_file()); let fp = paths::root().join("foo/.gitignore"); - let mut contents = String::new(); - File::open(&fp) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(fp).unwrap(); assert_eq!( contents, "/target\n\ @@ -110,11 +104,7 @@ .run(); let fp = paths::root().join("foo/.gitignore"); - let mut contents = String::new(); - File::open(&fp) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&fp).unwrap(); assert_eq!( contents, "**/some.file\n\n\ @@ -140,16 +130,13 @@ let sourcefile_path = path.join(rellocation); - let content = br#" + let content = r#" fn main() { println!("Hello, world 2!"); } "#; - File::create(&sourcefile_path) - .unwrap() - .write_all(content) - .unwrap(); + fs::write(&sourcefile_path, content).unwrap(); if explicit { cargo_process("init --bin --vcs none") @@ -167,12 +154,8 @@ assert!(!paths::root().join("foo/src/lib.rs").is_file()); // Check that our file is not overwritten - let mut new_content = Vec::new(); - File::open(&sourcefile_path) - .unwrap() - .read_to_end(&mut new_content) - .unwrap(); - assert_eq!(Vec::from(content as &[u8]), new_content); + let new_content = fs::read_to_string(&sourcefile_path).unwrap(); + assert_eq!(content, new_content); } #[cargo_test] @@ -210,22 +193,19 @@ let path = paths::root().join("foo"); fs::create_dir_all(&path.join("src")).unwrap(); - let sourcefile_path1 = path.join("src/lib.rs"); + let path1 = path.join("src/lib.rs"); + fs::write(path1, r#"fn qqq () { println!("Hello, world 2!"); }"#).unwrap(); - File::create(&sourcefile_path1) - .unwrap() - .write_all(br#"fn qqq () { println!("Hello, world 2!"); }"#) - .unwrap(); - - let sourcefile_path2 = path.join("lib.rs"); - - File::create(&sourcefile_path2) - .unwrap() - .write_all(br#" fn qqq () { println!("Hello, world 3!"); }"#) - .unwrap(); + let path2 = path.join("lib.rs"); + fs::write(path2, r#" fn qqq () { println!("Hello, world 3!"); }"#).unwrap(); - cargo_process("init --vcs none").env("USER", "foo").cwd(&path).with_status(101).with_stderr( - "[ERROR] cannot have a package with multiple libraries, found both `src/lib.rs` and `lib.rs`", + cargo_process("init --vcs none") + .env("USER", "foo") + .cwd(&path) + .with_status(101) + .with_stderr( + "[ERROR] cannot have a package with multiple libraries, \ + found both `src/lib.rs` and `lib.rs`", ) .run(); @@ -237,19 +217,11 @@ let path = paths::root().join("foo"); fs::create_dir(&path).unwrap(); - let sourcefile_path1 = path.join("foo.rs"); + let path1 = path.join("foo.rs"); + fs::write(path1, r#"fn main () { println!("Hello, world 2!"); }"#).unwrap(); - File::create(&sourcefile_path1) - .unwrap() - .write_all(br#"fn main () { println!("Hello, world 2!"); }"#) - .unwrap(); - - let sourcefile_path2 = path.join("main.rs"); - - File::create(&sourcefile_path2) - .unwrap() - .write_all(br#"fn main () { println!("Hello, world 3!"); }"#) - .unwrap(); + let path2 = path.join("main.rs"); + fs::write(path2, r#"fn main () { println!("Hello, world 3!"); }"#).unwrap(); cargo_process("init --lib --vcs none") .env("USER", "foo") @@ -274,14 +246,8 @@ let sourcefile_path = path.join(rellocation); - let content = br#" - pub fn qqq() {} - "#; - - File::create(&sourcefile_path) - .unwrap() - .write_all(content) - .unwrap(); + let content = "pub fn qqq() {}"; + fs::write(&sourcefile_path, content).unwrap(); cargo_process("init --vcs none") .env("USER", "foo") @@ -292,12 +258,8 @@ assert!(!paths::root().join("foo/src/main.rs").is_file()); // Check that our file is not overwritten - let mut new_content = Vec::new(); - File::open(&sourcefile_path) - .unwrap() - .read_to_end(&mut new_content) - .unwrap(); - assert_eq!(Vec::from(content as &[u8]), new_content); + let new_content = fs::read_to_string(&sourcefile_path).unwrap(); + assert_eq!(content, new_content); } #[cargo_test] @@ -342,9 +304,8 @@ .with_status(101) .with_stderr( "\ -[ERROR] Invalid character `.` in crate name: `foo.bar` -use --name to override crate name -", +[ERROR] invalid character `.` in crate name: `foo.bar`, [..] +use --name to override crate name", ) .run(); @@ -361,7 +322,7 @@ .with_status(101) .with_stderr( "\ -[ERROR] The name `test` cannot be used as a crate name\n\ +[ERROR] the name `test` cannot be used as a crate name, it conflicts [..]\n\ use --name to override crate name ", ) @@ -398,10 +359,7 @@ fn gitignore_appended_not_replaced() { fs::create_dir(&paths::root().join(".git")).unwrap(); - File::create(&paths::root().join(".gitignore")) - .unwrap() - .write_all(b"qqqqqq\n") - .unwrap(); + fs::write(&paths::root().join(".gitignore"), "qqqqqq\n").unwrap(); cargo_process("init --lib").env("USER", "foo").run(); @@ -410,32 +368,21 @@ assert!(paths::root().join(".git").is_dir()); assert!(paths::root().join(".gitignore").is_file()); - let mut contents = String::new(); - File::open(&paths::root().join(".gitignore")) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); - assert!(contents.contains(r#"qqqqqq"#)); + let contents = fs::read_to_string(&paths::root().join(".gitignore")).unwrap(); + assert!(contents.contains("qqqqqq")); } #[cargo_test] fn gitignore_added_newline_in_existing() { fs::create_dir(&paths::root().join(".git")).unwrap(); - File::create(&paths::root().join(".gitignore")) - .unwrap() - .write_all(b"first") - .unwrap(); + fs::write(&paths::root().join(".gitignore"), "first").unwrap(); cargo_process("init --lib").env("USER", "foo").run(); assert!(paths::root().join(".gitignore").is_file()); - let mut contents = String::new(); - File::open(&paths::root().join(".gitignore")) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&paths::root().join(".gitignore")).unwrap(); assert!(contents.starts_with("first\n")); } @@ -447,11 +394,7 @@ assert!(paths::root().join(".gitignore").is_file()); - let mut contents = String::new(); - File::open(&paths::root().join(".gitignore")) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&paths::root().join(".gitignore")).unwrap(); assert!(!contents.starts_with('\n')); } @@ -459,20 +402,13 @@ fn mercurial_added_newline_in_existing() { fs::create_dir(&paths::root().join(".hg")).unwrap(); - File::create(&paths::root().join(".hgignore")) - .unwrap() - .write_all(b"first") - .unwrap(); + fs::write(&paths::root().join(".hgignore"), "first").unwrap(); cargo_process("init --lib").env("USER", "foo").run(); assert!(paths::root().join(".hgignore").is_file()); - let mut contents = String::new(); - File::open(&paths::root().join(".hgignore")) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&paths::root().join(".hgignore")).unwrap(); assert!(contents.starts_with("first\n")); } @@ -484,11 +420,7 @@ assert!(paths::root().join(".hgignore").is_file()); - let mut contents = String::new(); - File::open(&paths::root().join(".hgignore")) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&paths::root().join(".hgignore")).unwrap(); assert!(!contents.starts_with('\n')); } @@ -559,11 +491,7 @@ assert!(paths::root().join(".gitignore").is_file()); - let mut contents = String::new(); - File::open(&paths::root().join(".gitignore")) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&paths::root().join(".gitignore")).unwrap(); assert!(contents.contains(r#"Cargo.lock"#)); } @@ -571,20 +499,13 @@ fn cargo_lock_gitignored_if_lib2() { fs::create_dir(&paths::root().join(".git")).unwrap(); - File::create(&paths::root().join("lib.rs")) - .unwrap() - .write_all(br#""#) - .unwrap(); + fs::write(&paths::root().join("lib.rs"), "").unwrap(); cargo_process("init --vcs git").env("USER", "foo").run(); assert!(paths::root().join(".gitignore").is_file()); - let mut contents = String::new(); - File::open(&paths::root().join(".gitignore")) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&paths::root().join(".gitignore")).unwrap(); assert!(contents.contains(r#"Cargo.lock"#)); } @@ -598,11 +519,7 @@ assert!(paths::root().join(".gitignore").is_file()); - let mut contents = String::new(); - File::open(&paths::root().join(".gitignore")) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&paths::root().join(".gitignore")).unwrap(); assert!(!contents.contains(r#"Cargo.lock"#)); } @@ -610,20 +527,13 @@ fn cargo_lock_not_gitignored_if_bin2() { fs::create_dir(&paths::root().join(".git")).unwrap(); - File::create(&paths::root().join("main.rs")) - .unwrap() - .write_all(br#""#) - .unwrap(); + fs::write(&paths::root().join("main.rs"), "").unwrap(); cargo_process("init --vcs git").env("USER", "foo").run(); assert!(paths::root().join(".gitignore").is_file()); - let mut contents = String::new(); - File::open(&paths::root().join(".gitignore")) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&paths::root().join(".gitignore")).unwrap(); assert!(!contents.contains(r#"Cargo.lock"#)); } diff -Nru cargo-0.44.1/tests/testsuite/install.rs cargo-0.47.0/tests/testsuite/install.rs --- cargo-0.44.1/tests/testsuite/install.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/install.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,6 +1,6 @@ //! Tests for the `cargo install` command. -use std::fs::{self, File, OpenOptions}; +use std::fs::{self, OpenOptions}; use std::io::prelude::*; use cargo_test_support::cross_compile; @@ -9,8 +9,10 @@ assert_has_installed_exe, assert_has_not_installed_exe, cargo_home, }; use cargo_test_support::paths; -use cargo_test_support::registry::Package; -use cargo_test_support::{basic_manifest, cargo_process, project, NO_SUCH_FILE_ERR_MSG}; +use cargo_test_support::registry::{registry_path, registry_url, Package}; +use cargo_test_support::{ + basic_manifest, cargo_process, no_such_file_err_msg, project, symlink_supported, t, +}; fn pkg(name: &str, vers: &str) { Package::new(name, vers) @@ -50,6 +52,35 @@ } #[cargo_test] +fn with_index() { + pkg("foo", "0.0.1"); + + cargo_process("install foo --index") + .arg(registry_url().to_string()) + .with_stderr(&format!( + "\ +[UPDATING] `{reg}` index +[DOWNLOADING] crates ... +[DOWNLOADED] foo v0.0.1 (registry `{reg}`) +[INSTALLING] foo v0.0.1 (registry `{reg}`) +[COMPILING] foo v0.0.1 (registry `{reg}`) +[FINISHED] release [optimized] target(s) in [..] +[INSTALLING] [CWD]/home/.cargo/bin/foo[EXE] +[INSTALLED] package `foo v0.0.1 (registry `{reg}`)` (executable `foo[EXE]`) +[WARNING] be sure to add `[..]` to your PATH to be able to run the installed binaries +", + reg = registry_path().to_str().unwrap() + )) + .run(); + assert_has_installed_exe(cargo_home(), "foo"); + + cargo_process("uninstall foo") + .with_stderr("[REMOVING] [CWD]/home/.cargo/bin/foo[EXE]") + .run(); + assert_has_not_installed_exe(cargo_home(), "foo"); +} + +#[cargo_test] fn multiple_pkgs() { pkg("foo", "0.0.1"); pkg("bar", "0.0.2"); @@ -73,7 +104,7 @@ [FINISHED] release [optimized] target(s) in [..] [INSTALLING] [CWD]/home/.cargo/bin/bar[EXE] [INSTALLED] package `bar v0.0.2` (executable `bar[EXE]`) -[ERROR] could not find `baz` in registry `[..]` +[ERROR] could not find `baz` in registry `[..]` with version `*` [SUMMARY] Successfully installed foo, bar! Failed to install baz (see error(s) above). [WARNING] be sure to add `[..]` to your PATH to be able to run the installed binaries [ERROR] some crates failed to install @@ -145,7 +176,21 @@ .with_stderr( "\ [UPDATING] [..] index -[ERROR] could not find `bar` in registry `[..]` +[ERROR] could not find `bar` in registry `[..]` with version `*` +", + ) + .run(); +} + +#[cargo_test] +fn missing_current_working_directory() { + cargo_process("install .") + .with_status(101) + .with_stderr( + "\ +error: To install the binaries for the package in current working \ +directory use `cargo install --path .`. Use `cargo build` if you \ +want to simply build the package. ", ) .run(); @@ -201,18 +246,16 @@ let t4 = cargo_home(); fs::create_dir(root.join(".cargo")).unwrap(); - File::create(root.join(".cargo/config")) - .unwrap() - .write_all( - format!( - "[install] - root = '{}' - ", - t3.display() - ) - .as_bytes(), - ) - .unwrap(); + fs::write( + root.join(".cargo/config"), + &format!( + "[install] + root = '{}' + ", + t3.display() + ), + ) + .unwrap(); println!("install --root"); @@ -266,6 +309,35 @@ } #[cargo_test] +fn install_target_dir() { + let p = project().file("src/main.rs", "fn main() {}").build(); + + p.cargo("install --target-dir td_test") + .with_stderr( + "\ +[WARNING] Using `cargo install` [..] +[INSTALLING] foo v0.0.1 [..] +[COMPILING] foo v0.0.1 [..] +[FINISHED] release [..] +[INSTALLING] [..]foo[EXE] +[INSTALLED] package `foo v0.0.1 [..]foo[..]` (executable `foo[EXE]`) +[WARNING] be sure to add [..] +", + ) + .run(); + + let mut path = p.root(); + path.push("td_test"); + assert!(path.exists()); + + #[cfg(not(windows))] + path.push("release/foo"); + #[cfg(windows)] + path.push("release/foo.exe"); + assert!(path.exists()); +} + +#[cargo_test] fn multiple_crates_error() { let p = git::repo(&paths::root().join("foo")) .file("Cargo.toml", &basic_manifest("foo", "0.1.0")) @@ -831,7 +903,7 @@ Caused by: {err_msg}", - err_msg = NO_SUCH_FILE_ERR_MSG, + err_msg = no_such_file_err_msg(), )) .run(); } @@ -1446,3 +1518,40 @@ .with_stderr_contains(" invalid type: integer `3`[..]") .run(); } + +#[cargo_test] +fn install_git_with_symlink_home() { + // Ensure that `cargo install` with a git repo is OK when CARGO_HOME is a + // symlink, and uses an build script. + if !symlink_supported() { + return; + } + let p = git::new("foo", |p| { + p.file("Cargo.toml", &basic_manifest("foo", "1.0.0")) + .file("src/main.rs", "fn main() {}") + // This triggers discover_git_and_list_files for detecting changed files. + .file("build.rs", "fn main() {}") + }); + #[cfg(unix)] + use std::os::unix::fs::symlink; + #[cfg(windows)] + use std::os::windows::fs::symlink_dir as symlink; + + let actual = paths::root().join("actual-home"); + t!(std::fs::create_dir(&actual)); + t!(symlink(&actual, paths::home().join(".cargo"))); + cargo_process("install --git") + .arg(p.url().to_string()) + .with_stderr( + "\ +[UPDATING] git repository [..] +[INSTALLING] foo v1.0.0 [..] +[COMPILING] foo v1.0.0 [..] +[FINISHED] [..] +[INSTALLING] [..]home/.cargo/bin/foo[..] +[INSTALLED] package `foo [..] +[WARNING] be sure to add [..] +", + ) + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/install_upgrade.rs cargo-0.47.0/tests/testsuite/install_upgrade.rs --- cargo-0.44.1/tests/testsuite/install_upgrade.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/install_upgrade.rs 2020-07-17 20:39:39.000000000 +0000 @@ -14,9 +14,9 @@ basic_manifest, cargo_process, cross_compile, execs, git, process, project, Execs, }; -// Helper for publishing a package. -fn pkg(name: &str, vers: &str) { +fn pkg_maybe_yanked(name: &str, vers: &str, yanked: bool) { Package::new(name, vers) + .yanked(yanked) .file( "src/main.rs", r#"fn main() { println!("{}", env!("CARGO_PKG_VERSION")) }"#, @@ -24,6 +24,11 @@ .publish(); } +// Helper for publishing a package. +fn pkg(name: &str, vers: &str) { + pkg_maybe_yanked(name, vers, false) +} + fn v1_path() -> PathBuf { cargo_home().join(".crates.toml") } @@ -225,7 +230,6 @@ cargo_process("install foo --version=1.0") .with_stderr( "\ -[UPDATING] `[..]` index [ERROR] the `--vers` provided, `1.0`, is not a valid semver version: cannot parse '1.0' as a semver if you want to specify semver range, add an explicit qualifier, like ^1.0 @@ -746,3 +750,111 @@ // 0.1.0 should not have any entries. validate_trackers("foo", "0.1.0", &[]); } + +#[cargo_test] +fn already_installed_exact_does_not_update() { + pkg("foo", "1.0.0"); + cargo_process("install foo --version=1.0.0").run(); + cargo_process("install foo --version=1.0.0") + .with_stderr( + "\ +[IGNORED] package `foo v1.0.0` is already installed[..] +[WARNING] be sure to add [..] +", + ) + .run(); + + cargo_process("install foo --version=>=1.0.0") + .with_stderr( + "\ +[UPDATING] `[..]` index +[IGNORED] package `foo v1.0.0` is already installed[..] +[WARNING] be sure to add [..] +", + ) + .run(); + pkg("foo", "1.0.1"); + cargo_process("install foo --version=>=1.0.0") + .with_stderr( + "\ +[UPDATING] `[..]` index +[DOWNLOADING] crates ... +[DOWNLOADED] foo v1.0.1 (registry [..]) +[INSTALLING] foo v1.0.1 +[COMPILING] foo v1.0.1 +[FINISHED] release [optimized] target(s) in [..] +[REPLACING] [CWD]/home/.cargo/bin/foo[EXE] +[REPLACED] package `foo v1.0.0` with `foo v1.0.1` (executable `foo[EXE]`) +[WARNING] be sure to add [..] +", + ) + .run(); +} + +#[cargo_test] +fn already_installed_updates_yank_status_on_upgrade() { + pkg("foo", "1.0.0"); + pkg_maybe_yanked("foo", "1.0.1", true); + cargo_process("install foo --version=1.0.0").run(); + + cargo_process("install foo --version=1.0.1") + .with_status(101) + .with_stderr( + "\ +[UPDATING] `[..]` index +[ERROR] could not find `foo` in registry `[..]` with version `=1.0.1` +", + ) + .run(); + + pkg_maybe_yanked("foo", "1.0.1", false); + + pkg("foo", "1.0.1"); + cargo_process("install foo --version=1.0.1") + .with_stderr( + "\ +[UPDATING] `[..]` index +[DOWNLOADING] crates ... +[DOWNLOADED] foo v1.0.1 (registry [..]) +[INSTALLING] foo v1.0.1 +[COMPILING] foo v1.0.1 +[FINISHED] release [optimized] target(s) in [..] +[REPLACING] [CWD]/home/.cargo/bin/foo[EXE] +[REPLACED] package `foo v1.0.0` with `foo v1.0.1` (executable `foo[EXE]`) +[WARNING] be sure to add [..] +", + ) + .run(); +} + +#[cargo_test] +fn partially_already_installed_does_one_update() { + pkg("foo", "1.0.0"); + cargo_process("install foo --version=1.0.0").run(); + pkg("bar", "1.0.0"); + pkg("baz", "1.0.0"); + cargo_process("install foo bar baz --version=1.0.0") + .with_stderr( + "\ +[IGNORED] package `foo v1.0.0` is already installed[..] +[UPDATING] `[..]` index +[DOWNLOADING] crates ... +[DOWNLOADED] bar v1.0.0 (registry [..]) +[INSTALLING] bar v1.0.0 +[COMPILING] bar v1.0.0 +[FINISHED] release [optimized] target(s) in [..] +[INSTALLING] [CWD]/home/.cargo/bin/bar[EXE] +[INSTALLED] package `bar v1.0.0` (executable `bar[EXE]`) +[DOWNLOADING] crates ... +[DOWNLOADED] baz v1.0.0 (registry [..]) +[INSTALLING] baz v1.0.0 +[COMPILING] baz v1.0.0 +[FINISHED] release [optimized] target(s) in [..] +[INSTALLING] [CWD]/home/.cargo/bin/baz[EXE] +[INSTALLED] package `baz v1.0.0` (executable `baz[EXE]`) +[SUMMARY] Successfully installed foo, bar, baz! +[WARNING] be sure to add [..] +", + ) + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/local_registry.rs cargo-0.47.0/tests/testsuite/local_registry.rs --- cargo-0.44.1/tests/testsuite/local_registry.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/local_registry.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,24 +1,23 @@ //! Tests for local-registry sources. -use std::fs::{self, File}; -use std::io::prelude::*; - use cargo_test_support::paths::{self, CargoPathExt}; use cargo_test_support::registry::{registry_path, Package}; use cargo_test_support::{basic_manifest, project, t}; +use std::fs; fn setup() { let root = paths::root(); t!(fs::create_dir(&root.join(".cargo"))); - t!(t!(File::create(root.join(".cargo/config"))).write_all( - br#" - [source.crates-io] - registry = 'https://wut' - replace-with = 'my-awesome-local-registry' - - [source.my-awesome-local-registry] - local-registry = 'registry' - "# + t!(fs::write( + root.join(".cargo/config"), + r#" + [source.crates-io] + registry = 'https://wut' + replace-with = 'my-awesome-local-registry' + + [source.my-awesome-local-registry] + local-registry = 'registry' + "# )); } @@ -441,14 +440,15 @@ fn crates_io_registry_url_is_optional() { let root = paths::root(); t!(fs::create_dir(&root.join(".cargo"))); - t!(t!(File::create(root.join(".cargo/config"))).write_all( - br#" - [source.crates-io] - replace-with = 'my-awesome-local-registry' - - [source.my-awesome-local-registry] - local-registry = 'registry' - "# + t!(fs::write( + root.join(".cargo/config"), + r#" + [source.crates-io] + replace-with = 'my-awesome-local-registry' + + [source.my-awesome-local-registry] + local-registry = 'registry' + "# )); Package::new("bar", "0.0.1") diff -Nru cargo-0.44.1/tests/testsuite/login.rs cargo-0.47.0/tests/testsuite/login.rs --- cargo-0.44.1/tests/testsuite/login.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/login.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,14 +1,13 @@ //! Tests for the `cargo login` command. -use std::fs::{self, File, OpenOptions}; -use std::io::prelude::*; -use std::path::PathBuf; - use cargo::core::Shell; use cargo::util::config::Config; use cargo_test_support::install::cargo_home; use cargo_test_support::registry::{self, registry_url}; use cargo_test_support::{cargo_process, paths, t}; +use std::fs::{self, OpenOptions}; +use std::io::prelude::*; +use std::path::PathBuf; const TOKEN: &str = "test-token"; const TOKEN2: &str = "test-token2"; @@ -26,19 +25,17 @@ fn setup_new_credentials_at(config: PathBuf) { t!(fs::create_dir_all(config.parent().unwrap())); - t!(t!(File::create(&config)) - .write_all(format!(r#"token = "{token}""#, token = ORIGINAL_TOKEN).as_bytes())); + t!(fs::write( + &config, + format!(r#"token = "{token}""#, token = ORIGINAL_TOKEN) + )); } fn check_token(expected_token: &str, registry: Option<&str>) -> bool { let credentials = cargo_home().join("credentials"); assert!(credentials.is_file()); - let mut contents = String::new(); - File::open(&credentials) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&credentials).unwrap(); let toml: toml::Value = contents.parse().unwrap(); let token = match (registry, toml) { diff -Nru cargo-0.44.1/tests/testsuite/lto.rs cargo-0.47.0/tests/testsuite/lto.rs --- cargo-0.44.1/tests/testsuite/lto.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/lto.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,726 @@ +use cargo::core::compiler::Lto; +use cargo_test_support::registry::Package; +use cargo_test_support::{project, Project}; +use std::process::Output; + +#[cargo_test] +fn with_deps() { + if !cargo_test_support::is_nightly() { + return; + } + + Package::new("bar", "0.0.1").publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "test" + version = "0.0.0" + + [dependencies] + bar = "*" + + [profile.release] + lto = true + "#, + ) + .file("src/main.rs", "extern crate bar; fn main() {}") + .build(); + p.cargo("build -v --release") + .with_stderr_contains("[..]`rustc[..]--crate-name bar[..]-Clinker-plugin-lto[..]`") + .with_stderr_contains("[..]`rustc[..]--crate-name test[..]-C lto[..]`") + .run(); +} + +#[cargo_test] +fn shared_deps() { + if !cargo_test_support::is_nightly() { + return; + } + + Package::new("bar", "0.0.1").publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "test" + version = "0.0.0" + + [dependencies] + bar = "*" + + [build-dependencies] + bar = "*" + + [profile.release] + lto = true + "#, + ) + .file("build.rs", "extern crate bar; fn main() {}") + .file("src/main.rs", "extern crate bar; fn main() {}") + .build(); + p.cargo("build -v --release") + .with_stderr_contains("[..]`rustc[..]--crate-name test[..]-C lto[..]`") + .run(); +} + +#[cargo_test] +fn build_dep_not_ltod() { + if !cargo_test_support::is_nightly() { + return; + } + + Package::new("bar", "0.0.1").publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "test" + version = "0.0.0" + + [build-dependencies] + bar = "*" + + [profile.release] + lto = true + "#, + ) + .file("build.rs", "extern crate bar; fn main() {}") + .file("src/main.rs", "fn main() {}") + .build(); + p.cargo("build -v --release") + .with_stderr_contains("[..]`rustc[..]--crate-name bar[..]-Cembed-bitcode=no[..]`") + .with_stderr_contains("[..]`rustc[..]--crate-name test[..]-C lto[..]`") + .run(); +} + +#[cargo_test] +fn complicated() { + if !cargo_test_support::is_nightly() { + return; + } + + Package::new("dep-shared", "0.0.1") + .file("src/lib.rs", "pub fn foo() {}") + .publish(); + Package::new("dep-normal2", "0.0.1") + .file("src/lib.rs", "pub fn foo() {}") + .publish(); + Package::new("dep-normal", "0.0.1") + .dep("dep-shared", "*") + .dep("dep-normal2", "*") + .file( + "src/lib.rs", + " + pub fn foo() { + dep_shared::foo(); + dep_normal2::foo(); + } + ", + ) + .publish(); + Package::new("dep-build2", "0.0.1") + .file("src/lib.rs", "pub fn foo() {}") + .publish(); + Package::new("dep-build", "0.0.1") + .dep("dep-shared", "*") + .dep("dep-build2", "*") + .file( + "src/lib.rs", + " + pub fn foo() { + dep_shared::foo(); + dep_build2::foo(); + } + ", + ) + .publish(); + Package::new("dep-proc-macro2", "0.0.1") + .file("src/lib.rs", "pub fn foo() {}") + .publish(); + Package::new("dep-proc-macro", "0.0.1") + .proc_macro(true) + .dep("dep-shared", "*") + .dep("dep-proc-macro2", "*") + .file( + "src/lib.rs", + " + extern crate proc_macro; + use proc_macro::TokenStream; + + #[proc_macro_attribute] + pub fn foo(_: TokenStream, a: TokenStream) -> TokenStream { + dep_shared::foo(); + dep_proc_macro2::foo(); + a + } + ", + ) + .publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "test" + version = "0.0.0" + + [lib] + crate-type = ['cdylib', 'staticlib'] + + [dependencies] + dep-normal = "*" + dep-proc-macro = "*" + + [build-dependencies] + dep-build = "*" + + [profile.release] + lto = true + "#, + ) + .file("build.rs", "fn main() { dep_build::foo() }") + .file( + "src/main.rs", + "#[dep_proc_macro::foo] fn main() { dep_normal::foo() }", + ) + .file( + "src/lib.rs", + "#[dep_proc_macro::foo] pub fn foo() { dep_normal::foo() }", + ) + .build(); + p.cargo("build -v --release") + // normal deps and their transitive dependencies do not need object + // code, so they should have linker-plugin-lto specified + .with_stderr_contains("[..]`rustc[..]--crate-name dep_normal2 [..]-Clinker-plugin-lto[..]`") + .with_stderr_contains("[..]`rustc[..]--crate-name dep_normal [..]-Clinker-plugin-lto[..]`") + // build dependencies and their transitive deps don't need any bitcode, + // so embedding should be turned off + .with_stderr_contains("[..]`rustc[..]--crate-name dep_build2 [..]-Cembed-bitcode=no[..]`") + .with_stderr_contains("[..]`rustc[..]--crate-name dep_build [..]-Cembed-bitcode=no[..]`") + .with_stderr_contains( + "[..]`rustc[..]--crate-name build_script_build [..]-Cembed-bitcode=no[..]`", + ) + // proc macro deps are the same as build deps here + .with_stderr_contains( + "[..]`rustc[..]--crate-name dep_proc_macro2 [..]-Cembed-bitcode=no[..]`", + ) + .with_stderr_contains( + "[..]`rustc[..]--crate-name dep_proc_macro [..]-Cembed-bitcode=no[..]`", + ) + .with_stderr_contains("[..]`rustc[..]--crate-name test [..]--crate-type bin[..]-C lto[..]`") + .with_stderr_contains( + "[..]`rustc[..]--crate-name test [..]--crate-type cdylib[..]-C lto[..]`", + ) + .with_stderr_contains("[..]`rustc[..]--crate-name dep_shared [..]`") + .with_stderr_does_not_contain("[..]--crate-name dep_shared[..]-C lto[..]") + .with_stderr_does_not_contain("[..]--crate-name dep_shared[..]-Clinker-plugin-lto[..]") + .with_stderr_does_not_contain("[..]--crate-name dep_shared[..]-Cembed-bitcode[..]") + .run(); +} + +#[cargo_test] +fn off_in_manifest_works() { + if !cargo_test_support::is_nightly() { + return; + } + + Package::new("bar", "0.0.1") + .file("src/lib.rs", "pub fn foo() {}") + .publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "test" + version = "0.0.0" + + [dependencies] + bar = "*" + + [profile.release] + lto = "off" + "#, + ) + .file("src/lib.rs", "pub fn foo() {}") + .file( + "src/main.rs", + "fn main() { + test::foo(); + bar::foo(); + }", + ) + .build(); + p.cargo("build -v --release") + .with_stderr( + "\ +[UPDATING] [..] +[DOWNLOADING] [..] +[DOWNLOADED] [..] +[COMPILING] bar v0.0.1 +[RUNNING] `rustc --crate-name bar [..]--crate-type lib [..]-Cembed-bitcode=no[..] +[COMPILING] test [..] +[RUNNING] `rustc --crate-name test [..]--crate-type lib [..]-Cembed-bitcode=no[..] +[RUNNING] `rustc --crate-name test src/main.rs [..]--crate-type bin [..]-C lto=off[..] +[FINISHED] [..] +", + ) + .run(); +} + +#[cargo_test] +fn between_builds() { + if !cargo_test_support::is_nightly() { + return; + } + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "test" + version = "0.0.0" + + [profile.release] + lto = true + "#, + ) + .file("src/lib.rs", "pub fn foo() {}") + .file("src/main.rs", "fn main() { test::foo() }") + .build(); + p.cargo("build -v --release --lib") + .with_stderr( + "\ +[COMPILING] test [..] +[RUNNING] `rustc [..]--crate-type lib[..]-Clinker-plugin-lto[..] +[FINISHED] [..] +", + ) + .run(); + p.cargo("build -v --release") + .with_stderr_contains( + "\ +[COMPILING] test [..] +[RUNNING] `rustc [..]--crate-type bin[..]-C lto[..] +[FINISHED] [..] +", + ) + .run(); +} + +#[cargo_test] +fn test_all() { + if !cargo_test_support::is_nightly() { + return; + } + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.0.0" + + [profile.release] + lto = true + "#, + ) + .file("src/main.rs", "fn main() {}") + .file("tests/a.rs", "") + .file("tests/b.rs", "") + .build(); + p.cargo("test --release -v") + .with_stderr_contains("[RUNNING] `rustc[..]--crate-name foo[..]-C lto[..]") + .run(); +} + +#[cargo_test] +fn test_all_and_bench() { + if !cargo_test_support::is_nightly() { + return; + } + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.0.0" + + [profile.release] + lto = true + [profile.bench] + lto = true + "#, + ) + .file("src/main.rs", "fn main() {}") + .file("tests/a.rs", "") + .file("tests/b.rs", "") + .build(); + p.cargo("test --release -v") + .with_stderr_contains("[RUNNING] `rustc[..]--crate-name a[..]-C lto[..]") + .with_stderr_contains("[RUNNING] `rustc[..]--crate-name b[..]-C lto[..]") + .with_stderr_contains("[RUNNING] `rustc[..]--crate-name foo[..]-C lto[..]") + .run(); +} + +fn project_with_dep(crate_types: &str) -> Project { + Package::new("registry", "0.0.1") + .file("src/lib.rs", r#"pub fn foo() { println!("registry"); }"#) + .publish(); + Package::new("registry-shared", "0.0.1") + .file("src/lib.rs", r#"pub fn foo() { println!("shared"); }"#) + .publish(); + + project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.0.0" + + [workspace] + + [dependencies] + bar = { path = 'bar' } + registry-shared = "*" + + [profile.release] + lto = true + "#, + ) + .file( + "src/main.rs", + " + fn main() { + bar::foo(); + registry_shared::foo(); + } + ", + ) + .file( + "bar/Cargo.toml", + &format!( + r#" + [package] + name = "bar" + version = "0.0.0" + + [dependencies] + registry = "*" + registry-shared = "*" + + [lib] + crate-type = [{}] + "#, + crate_types + ), + ) + .file( + "bar/src/lib.rs", + r#" + pub fn foo() { + println!("bar"); + registry::foo(); + registry_shared::foo(); + } + "#, + ) + .file("tests/a.rs", "") + .file("bar/tests/b.rs", "") + .build() +} + +fn verify_lto(output: &Output, krate: &str, krate_info: &str, expected_lto: Lto) { + let stderr = std::str::from_utf8(&output.stderr).unwrap(); + let mut matches = stderr.lines().filter(|line| { + line.contains("Running") + && line.contains(&format!("--crate-name {} ", krate)) + && line.contains(krate_info) + }); + let line = matches.next().unwrap_or_else(|| { + panic!( + "expected to find crate `{}` info: `{}`, not found in output:\n{}", + krate, krate_info, stderr + ); + }); + if let Some(line2) = matches.next() { + panic!( + "found multiple lines matching crate `{}` info: `{}`:\nline1:{}\nline2:{}\noutput:\n{}", + krate, krate_info, line, line2, stderr + ); + } + let actual_lto = if let Some(index) = line.find("-C lto=") { + let s = &line[index..]; + let end = s.find(' ').unwrap(); + let mode = &line[index..index + end]; + if mode == "off" { + Lto::Off + } else { + Lto::Run(Some(mode.into())) + } + } else if line.contains("-C lto") { + Lto::Run(None) + } else if line.contains("-Clinker-plugin-lto") { + Lto::OnlyBitcode + } else if line.contains("-Cembed-bitcode=no") { + Lto::OnlyObject + } else { + Lto::ObjectAndBitcode + }; + assert_eq!( + actual_lto, expected_lto, + "did not find expected LTO in line: {}", + line + ); +} + +#[cargo_test] +fn cdylib_and_rlib() { + if !cargo_test_support::is_nightly() { + return; + } + let p = project_with_dep("'cdylib', 'rlib'"); + let output = p.cargo("build --release -v").exec_with_output().unwrap(); + verify_lto( + &output, + "registry", + "--crate-type lib", + Lto::ObjectAndBitcode, + ); + verify_lto( + &output, + "registry_shared", + "--crate-type lib", + Lto::ObjectAndBitcode, + ); + verify_lto( + &output, + "bar", + "--crate-type cdylib --crate-type rlib", + Lto::ObjectAndBitcode, + ); + verify_lto(&output, "foo", "--crate-type bin", Lto::Run(None)); + p.cargo("test --release -v") + .with_stderr_unordered( + "\ +[FRESH] registry v0.0.1 +[FRESH] registry-shared v0.0.1 +[FRESH] bar v0.0.0 [..] +[COMPILING] foo [..] +[RUNNING] `rustc --crate-name foo [..]-Cembed-bitcode=no --test[..] +[RUNNING] `rustc --crate-name a [..]-Cembed-bitcode=no --test[..] +[FINISHED] [..] +[RUNNING] [..] +[RUNNING] [..] +", + ) + .run(); + p.cargo("build --release -v --manifest-path bar/Cargo.toml") + .with_stderr_unordered( + "\ +[FRESH] registry-shared v0.0.1 +[FRESH] registry v0.0.1 +[FRESH] bar v0.0.0 [..] +[FINISHED] [..] +", + ) + .run(); + p.cargo("test --release -v --manifest-path bar/Cargo.toml") + .with_stderr_unordered( + "\ +[FRESH] registry v0.0.1 +[FRESH] registry-shared v0.0.1 +[COMPILING] bar [..] +[RUNNING] `rustc --crate-name bar [..]-Cembed-bitcode=no --test[..] +[RUNNING] `rustc --crate-name b [..]-Cembed-bitcode=no --test[..] +[FINISHED] [..] +[RUNNING] [..] +[RUNNING] [..] +[DOCTEST] bar +[RUNNING] `rustdoc --crate-type cdylib --crate-type rlib --test [..] +", + ) + .run(); +} + +#[cargo_test] +fn dylib() { + if !cargo_test_support::is_nightly() { + return; + } + let p = project_with_dep("'dylib'"); + let output = p.cargo("build --release -v").exec_with_output().unwrap(); + verify_lto(&output, "registry", "--crate-type lib", Lto::OnlyObject); + verify_lto( + &output, + "registry_shared", + "--crate-type lib", + Lto::ObjectAndBitcode, + ); + verify_lto(&output, "bar", "--crate-type dylib", Lto::OnlyObject); + verify_lto(&output, "foo", "--crate-type bin", Lto::Run(None)); + p.cargo("test --release -v") + .with_stderr_unordered( + "\ +[FRESH] registry v0.0.1 +[FRESH] registry-shared v0.0.1 +[FRESH] bar v0.0.0 [..] +[COMPILING] foo [..] +[RUNNING] `rustc --crate-name foo [..]-Cembed-bitcode=no --test[..] +[RUNNING] `rustc --crate-name a [..]-Cembed-bitcode=no --test[..] +[FINISHED] [..] +[RUNNING] [..] +[RUNNING] [..] +", + ) + .run(); + p.cargo("build --release -v --manifest-path bar/Cargo.toml") + .with_stderr_unordered( + "\ +[COMPILING] registry-shared v0.0.1 +[FRESH] registry v0.0.1 +[RUNNING] `rustc --crate-name registry_shared [..]-Cembed-bitcode=no[..] +[COMPILING] bar [..] +[RUNNING] `rustc --crate-name bar [..]--crate-type dylib [..]-Cembed-bitcode=no[..] +[FINISHED] [..] +", + ) + .run(); + p.cargo("test --release -v --manifest-path bar/Cargo.toml") + .with_stderr_unordered( + "\ +[FRESH] registry-shared v0.0.1 +[FRESH] registry v0.0.1 +[COMPILING] bar [..] +[RUNNING] `rustc --crate-name bar [..]-Cembed-bitcode=no --test[..] +[RUNNING] `rustc --crate-name b [..]-Cembed-bitcode=no --test[..] +[FINISHED] [..] +[RUNNING] [..] +[RUNNING] [..] +", + ) + .run(); +} + +#[cargo_test] +fn test_profile() { + if !cargo_test_support::is_nightly() { + return; + } + Package::new("bar", "0.0.1") + .file("src/lib.rs", "pub fn foo() -> i32 { 123 } ") + .publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + edition = "2018" + + [profile.test] + lto = 'thin' + + [dependencies] + bar = "*" + "#, + ) + .file( + "src/lib.rs", + r#" + #[test] + fn t1() { + assert_eq!(123, bar::foo()); + } + "#, + ) + .build(); + + p.cargo("test -v") + .with_stderr("\ +[UPDATING] [..] +[DOWNLOADING] [..] +[DOWNLOADED] [..] +[COMPILING] bar v0.0.1 +[RUNNING] `rustc --crate-name bar [..]crate-type lib[..] +[COMPILING] foo [..] +[RUNNING] `rustc --crate-name foo [..]--crate-type lib --emit=dep-info,metadata,link -Cembed-bitcode=no[..] +[RUNNING] `rustc --crate-name foo [..]--emit=dep-info,link -C lto=thin [..]--test[..] +[FINISHED] [..] +[RUNNING] [..] +[DOCTEST] foo +[RUNNING] `rustdoc [..] +") + .run(); +} + +#[cargo_test] +fn dev_profile() { + if !cargo_test_support::is_nightly() { + return; + } + // Mixing dev=LTO with test=not-LTO + Package::new("bar", "0.0.1") + .file("src/lib.rs", "pub fn foo() -> i32 { 123 } ") + .publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + edition = "2018" + + [profile.dev] + lto = 'thin' + + [dependencies] + bar = "*" + "#, + ) + .file( + "src/lib.rs", + r#" + #[test] + fn t1() { + assert_eq!(123, bar::foo()); + } + "#, + ) + .build(); + + p.cargo("test -v") + .with_stderr("\ +[UPDATING] [..] +[DOWNLOADING] [..] +[DOWNLOADED] [..] +[COMPILING] bar v0.0.1 +[RUNNING] `rustc --crate-name bar [..]crate-type lib[..] +[COMPILING] foo [..] +[RUNNING] `rustc --crate-name foo [..]--crate-type lib --emit=dep-info,metadata,link -Clinker-plugin-lto [..] +[RUNNING] `rustc --crate-name foo [..]--emit=dep-info,link -Cembed-bitcode=no [..]--test[..] +[FINISHED] [..] +[RUNNING] [..] +[DOCTEST] foo +[RUNNING] `rustdoc [..] +") + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/main.rs cargo-0.47.0/tests/testsuite/main.rs --- cargo-0.44.1/tests/testsuite/main.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/main.rs 2020-07-17 20:39:39.000000000 +0000 @@ -3,7 +3,7 @@ #![allow(clippy::blacklisted_name)] #![allow(clippy::explicit_iter_loop)] #![allow(clippy::redundant_closure)] -#![allow(clippy::block_in_if_condition_stmt)] // clippy doesn't agree with rustfmt 😂 +#![allow(clippy::blocks_in_if_conditions)] // clippy doesn't agree with rustfmt 😂 #![allow(clippy::inefficient_to_string)] // this causes suggestions that result in `(*s).to_string()` #![warn(clippy::needless_borrow)] #![warn(clippy::redundant_clone)] @@ -24,10 +24,10 @@ mod cargo_alias_config; mod cargo_command; mod cargo_features; +mod cargo_targets; mod cfg; mod check; mod clean; -mod clippy; mod collisions; mod concurrent; mod config; @@ -61,17 +61,20 @@ mod locate_project; mod lockfile_compat; mod login; +mod lto; mod member_errors; mod message_format; mod metabuild; mod metadata; mod minimal_versions; +mod multitarget; mod net_config; mod new; mod offline; mod out_dir; mod owner; mod package; +mod package_features; mod patch; mod path; mod paths; @@ -95,6 +98,7 @@ mod rustc; mod rustc_info_cache; mod rustdoc; +mod rustdoc_extern_html; mod rustdocflags; mod rustflags; mod search; @@ -103,6 +107,9 @@ mod test; mod timings; mod tool_paths; +mod tree; +mod tree_graph_features; +mod unit_graph; mod update; mod vendor; mod verify_project; diff -Nru cargo-0.44.1/tests/testsuite/message_format.rs cargo-0.47.0/tests/testsuite/message_format.rs --- cargo-0.44.1/tests/testsuite/message_format.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/message_format.rs 2020-07-17 20:39:39.000000000 +0000 @@ -64,7 +64,10 @@ p.cargo("build --message-format json-render-diagnostics") .with_status(101) - .with_stdout("{\"reason\":\"compiler-artifact\",[..]") + .with_stdout( + "{\"reason\":\"compiler-artifact\",[..]\n\ + {\"reason\":\"build-finished\",\"success\":false}", + ) .with_stderr_contains( "\ [COMPILING] bar [..] diff -Nru cargo-0.44.1/tests/testsuite/metabuild.rs cargo-0.47.0/tests/testsuite/metabuild.rs --- cargo-0.44.1/tests/testsuite/metabuild.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/metabuild.rs 2020-07-17 20:39:39.000000000 +0000 @@ -32,7 +32,7 @@ Caused by: feature `metabuild` is required -consider adding `cargo-features = [\"metabuild\"]` to the manifest + consider adding `cargo-features = [\"metabuild\"]` to the manifest ", ) .run(); @@ -461,7 +461,7 @@ "package_version": "0.5.0", "target_kind": ["lib"], "compile_mode": "build", - "kind": "Host", + "kind": null, "deps": [], "outputs": [ "[..]/target/debug/deps/libmb-[..].rlib", @@ -478,7 +478,7 @@ "package_version": "0.0.1", "target_kind": ["lib"], "compile_mode": "build", - "kind": "Host", + "kind": null, "deps": [], "outputs": [ "[..]/target/debug/deps/libmb_other-[..].rlib", @@ -495,9 +495,9 @@ "package_version": "0.0.1", "target_kind": ["custom-build"], "compile_mode": "build", - "kind": "Host", + "kind": null, "deps": [0, 1], - "outputs": ["[..]/target/debug/build/foo-[..]/metabuild_foo-[..][EXE]"], + "outputs": "{...}", "links": "{...}", "program": "rustc", "args": "{...}", @@ -509,7 +509,7 @@ "package_version": "0.0.1", "target_kind": ["custom-build"], "compile_mode": "run-custom-build", - "kind": "Host", + "kind": null, "deps": [2], "outputs": [], "links": {}, @@ -523,7 +523,7 @@ "package_version": "0.0.1", "target_kind": ["lib"], "compile_mode": "build", - "kind": "Host", + "kind": null, "deps": [3], "outputs": [ "[..]/foo/target/debug/deps/libfoo-[..].rlib", @@ -686,9 +686,7 @@ { "executable": null, "features": [], - "filenames": [ - "[..]/foo/target/debug/build/foo-[..]/metabuild-foo[EXE]" - ], + "filenames": "{...}", "fresh": false, "package_id": "foo [..]", "profile": "{...}", diff -Nru cargo-0.44.1/tests/testsuite/metadata.rs cargo-0.47.0/tests/testsuite/metadata.rs --- cargo-0.44.1/tests/testsuite/metadata.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/metadata.rs 2020-07-17 20:39:39.000000000 +0000 @@ -67,7 +67,8 @@ }, "target_directory": "[..]foo/target", "version": 1, - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null }"#, ) .run(); @@ -159,7 +160,8 @@ }, "target_directory": "[..]foo/target", "version": 1, - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null }"#, ) .run(); @@ -248,7 +250,8 @@ }, "target_directory": "[..]foo/target", "version": 1, - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null }"#, ) .run(); @@ -537,7 +540,8 @@ "workspace_members": [ "foo 0.5.0 (path+file:[..]foo)" ], - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null }"#, ) .run(); @@ -622,7 +626,8 @@ }, "target_directory": "[..]foo/target", "version": 1, - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null }"#, ) .run(); @@ -708,7 +713,8 @@ }, "target_directory": "[..]foo/target", "version": 1, - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null }"#, ) .run(); @@ -722,6 +728,14 @@ r#" [workspace] members = ["bar", "baz"] + + [workspace.metadata] + tool1 = "hello" + tool2 = [1, 2, 3] + + [workspace.metadata.foo] + bar = 3 + "#, ) .file("bar/Cargo.toml", &basic_lib_manifest("bar")) @@ -822,7 +836,14 @@ }, "target_directory": "[..]foo/target", "version": 1, - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": { + "tool1": "hello", + "tool2": [1, 2, 3], + "foo": { + "bar": 3 + } + } }"#, ) .run(); @@ -920,7 +941,8 @@ "resolve": null, "target_directory": "[..]foo/target", "version": 1, - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null }"#, ) .run(); @@ -979,7 +1001,8 @@ "resolve": null, "target_directory": "[..]foo/target", "version": 1, - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null }"#; #[cargo_test] @@ -1115,6 +1138,7 @@ baz = "quux" "#, ) + .file("README.md", "") .file("src/lib.rs", "") .build(); @@ -1163,7 +1187,8 @@ "resolve": null, "target_directory": "[..]foo/target", "version": 1, - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null }"#, ) .run(); @@ -1186,6 +1211,7 @@ publish = ["my-registry"] "#, ) + .file("README.md", "") .file("src/lib.rs", "") .build(); @@ -1230,7 +1256,8 @@ "resolve": null, "target_directory": "[..]foo/target", "version": 1, - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null }"#, ) .run(); @@ -1315,7 +1342,8 @@ "workspace_members": [ "bar 0.5.0 (path+file:[..])" ], - "workspace_root": "[..]" + "workspace_root": "[..]", + "metadata": null } "#, ) @@ -1396,7 +1424,8 @@ "workspace_members": [ "foo 0.1.0 (path+file:[..])" ], - "workspace_root": "[..]" + "workspace_root": "[..]", + "metadata": null } "#, ) @@ -1493,7 +1522,8 @@ "workspace_members": [ "foo 0.1.0 (path+file:[..])" ], - "workspace_root": "[..]" + "workspace_root": "[..]", + "metadata": null } "#, ) @@ -1710,7 +1740,8 @@ "workspace_members": [ "foo 0.0.1[..]" ], - "workspace_root": "[..]" + "workspace_root": "[..]", + "metadata": null }"#, ) .run(); @@ -1801,7 +1832,8 @@ "workspace_members": [ "foo 0.5.0 [..]" ], - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null } "#, ) @@ -1896,7 +1928,8 @@ }, "target_directory": "[..]/foo/target", "version": 1, - "workspace_root": "[..]foo" + "workspace_root": "[..]foo", + "metadata": null } "#, ) @@ -2280,7 +2313,8 @@ }, "target_directory": "[..]/foo/target", "version": 1, - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null } "# .replace("$ALT_TRIPLE", alt_target) @@ -2354,7 +2388,8 @@ }, "target_directory": "[..]foo/target", "version": 1, - "workspace_root": "[..]foo" + "workspace_root": "[..]foo", + "metadata": null } "# .replace("$ALT_TRIPLE", alt_target) @@ -2425,7 +2460,8 @@ }, "target_directory": "[..]foo/target", "version": 1, - "workspace_root": "[..]foo" + "workspace_root": "[..]foo", + "metadata": null } "# .replace("$HOST_TRIPLE", &rustc_host()) @@ -2515,7 +2551,8 @@ }, "target_directory": "[..]/foo/target", "version": 1, - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null } "# .replace("$HOST_TRIPLE", &rustc_host()) @@ -2565,6 +2602,7 @@ "target_directory": "{...}", "version": 1, "workspace_root": "{...}", + "metadata": null, "resolve": { "nodes": [ { @@ -2679,6 +2717,7 @@ "target_directory": "[..]/foo/target", "version": 1, "workspace_root": "[..]/foo", + "metadata": null, "resolve": { "nodes": [ { diff -Nru cargo-0.44.1/tests/testsuite/multitarget.rs cargo-0.47.0/tests/testsuite/multitarget.rs --- cargo-0.44.1/tests/testsuite/multitarget.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/multitarget.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,144 @@ +//! Tests for multiple `--target` flags to subcommands + +use cargo_test_support::{basic_manifest, cross_compile, project, rustc_host}; + +#[cargo_test] +fn double_target_rejected() { + let p = project() + .file("Cargo.toml", &basic_manifest("foo", "1.0.0")) + .file("src/main.rs", "fn main() {}") + .build(); + + p.cargo("build --target a --target b") + .with_stderr("error: specifying multiple `--target` flags requires `-Zmultitarget`") + .with_status(101) + .run(); +} + +#[cargo_test] +fn simple_build() { + if cross_compile::disabled() { + return; + } + let t1 = cross_compile::alternate(); + let t2 = rustc_host(); + let p = project() + .file("Cargo.toml", &basic_manifest("foo", "1.0.0")) + .file("src/main.rs", "fn main() {}") + .build(); + + p.cargo("build -Z multitarget") + .arg("--target") + .arg(&t1) + .arg("--target") + .arg(&t2) + .masquerade_as_nightly_cargo() + .run(); + + assert!(p.target_bin(t1, "foo").is_file()); + assert!(p.target_bin(&t2, "foo").is_file()); +} + +#[cargo_test] +fn simple_test() { + if !cross_compile::can_run_on_host() { + return; + } + let t1 = cross_compile::alternate(); + let t2 = rustc_host(); + let p = project() + .file("Cargo.toml", &basic_manifest("foo", "1.0.0")) + .file("src/lib.rs", "fn main() {}") + .build(); + + p.cargo("test -Z multitarget") + .arg("--target") + .arg(&t1) + .arg("--target") + .arg(&t2) + .masquerade_as_nightly_cargo() + .with_stderr_contains(&format!("[RUNNING] [..]{}[..]", t1)) + .with_stderr_contains(&format!("[RUNNING] [..]{}[..]", t2)) + .run(); +} + +#[cargo_test] +fn simple_run() { + let p = project() + .file("Cargo.toml", &basic_manifest("foo", "1.0.0")) + .file("src/main.rs", "fn main() {}") + .build(); + + p.cargo("run -Z multitarget --target a --target b") + .with_stderr("error: only one `--target` argument is supported") + .with_status(101) + .masquerade_as_nightly_cargo() + .run(); +} + +#[cargo_test] +fn simple_doc() { + if cross_compile::disabled() { + return; + } + let t1 = cross_compile::alternate(); + let t2 = rustc_host(); + let p = project() + .file("Cargo.toml", &basic_manifest("foo", "1.0.0")) + .file("src/lib.rs", "//! empty lib") + .build(); + + p.cargo("doc -Z multitarget") + .arg("--target") + .arg(&t1) + .arg("--target") + .arg(&t2) + .masquerade_as_nightly_cargo() + .run(); + + assert!(p.build_dir().join(&t1).join("doc/foo/index.html").is_file()); + assert!(p.build_dir().join(&t2).join("doc/foo/index.html").is_file()); +} + +#[cargo_test] +fn simple_check() { + if cross_compile::disabled() { + return; + } + let t1 = cross_compile::alternate(); + let t2 = rustc_host(); + let p = project() + .file("Cargo.toml", &basic_manifest("foo", "1.0.0")) + .file("src/main.rs", "fn main() {}") + .build(); + + p.cargo("check -Z multitarget") + .arg("--target") + .arg(&t1) + .arg("--target") + .arg(&t2) + .masquerade_as_nightly_cargo() + .run(); +} + +#[cargo_test] +fn same_value_twice() { + if cross_compile::disabled() { + return; + } + let t = rustc_host(); + let p = project() + .file("Cargo.toml", &basic_manifest("foo", "1.0.0")) + .file("src/main.rs", "fn main() {}") + .build(); + + p.cargo("build -Z multitarget") + .arg("--target") + .arg(&t) + .arg("--target") + .arg(&t) + .masquerade_as_nightly_cargo() + .run(); + + assert!(p.target_bin(&t, "foo").is_file()); +} diff -Nru cargo-0.44.1/tests/testsuite/new.rs cargo-0.47.0/tests/testsuite/new.rs --- cargo-0.44.1/tests/testsuite/new.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/new.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,11 +1,9 @@ //! Tests for the `cargo new` command. -use std::env; -use std::fs::{self, File}; -use std::io::prelude::*; - use cargo_test_support::paths; use cargo_test_support::{cargo_process, git_process}; +use std::env; +use std::fs::{self, File}; fn create_empty_gitconfig() { // This helps on Windows where libgit2 is very aggressive in attempting to @@ -27,11 +25,7 @@ assert!(!paths::root().join("foo/.gitignore").is_file()); let lib = paths::root().join("foo/src/lib.rs"); - let mut contents = String::new(); - File::open(&lib) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&lib).unwrap(); assert_eq!( contents, r#"#[cfg(test)] @@ -86,11 +80,7 @@ assert!(paths::root().join("foo/.gitignore").is_file()); let fp = paths::root().join("foo/.gitignore"); - let mut contents = String::new(); - File::open(&fp) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&fp).unwrap(); assert_eq!(contents, "/target\nCargo.lock\n",); cargo_process("build").cwd(&paths::root().join("foo")).run(); @@ -126,11 +116,7 @@ fn invalid_characters() { cargo_process("new foo.rs") .with_status(101) - .with_stderr( - "\ -[ERROR] Invalid character `.` in crate name: `foo.rs` -use --name to override crate name", - ) + .with_stderr("[ERROR] invalid character `.` in crate name: `foo.rs`, [..]") .run(); } @@ -138,10 +124,7 @@ fn reserved_name() { cargo_process("new test") .with_status(101) - .with_stderr( - "[ERROR] The name `test` cannot be used as a crate name\n\ - use --name to override crate name", - ) + .with_stderr("[ERROR] the name `test` cannot be used as a crate name, it conflicts [..]") .run(); } @@ -150,8 +133,18 @@ cargo_process("new --bin incremental") .with_status(101) .with_stderr( - "[ERROR] The name `incremental` cannot be used as a crate name\n\ - use --name to override crate name", + "[ERROR] the name `incremental` cannot be used as a crate name, it conflicts [..]", + ) + .run(); + + cargo_process("new --lib incremental") + .env("USER", "foo") + .with_stderr( + "\ +[WARNING] the name `incremental` will not support binary executables with that name, \ +it conflicts with cargo's build directory names +[CREATED] library `incremental` package +", ) .run(); } @@ -160,9 +153,20 @@ fn keyword_name() { cargo_process("new pub") .with_status(101) + .with_stderr("[ERROR] the name `pub` cannot be used as a crate name, it is a Rust keyword") + .run(); +} + +#[cargo_test] +fn std_name() { + cargo_process("new core") + .env("USER", "foo") .with_stderr( - "[ERROR] The name `pub` cannot be used as a crate name\n\ - use --name to override crate name", + "\ +[WARNING] the name `core` is part of Rust's standard library +It is recommended to use a different name to avoid problems. +[CREATED] binary (application) `core` package +", ) .run(); } @@ -173,11 +177,7 @@ cargo_process("new foo").env("USER", "foo").run(); let toml = paths::root().join("foo/Cargo.toml"); - let mut contents = String::new(); - File::open(&toml) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&toml).unwrap(); assert!(contents.contains(r#"authors = ["foo"]"#)); } @@ -187,11 +187,7 @@ cargo_process("new foo").env("USER", "foo \"bar\"").run(); let toml = paths::root().join("foo/Cargo.toml"); - let mut contents = String::new(); - File::open(&toml) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&toml).unwrap(); assert!(contents.contains(r#"authors = ["foo \"bar\""]"#)); } @@ -204,11 +200,7 @@ .run(); let toml = paths::root().join("foo/Cargo.toml"); - let mut contents = String::new(); - File::open(&toml) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&toml).unwrap(); assert!(contents.contains(r#"authors = ["foo"]"#)); } @@ -221,11 +213,7 @@ .run(); let toml = paths::root().join("foo/Cargo.toml"); - let mut contents = String::new(); - File::open(&toml) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&toml).unwrap(); assert!(contents.contains(r#"authors = ["foo"]"#)); } @@ -239,11 +227,7 @@ .run(); let toml = paths::root().join("foo/Cargo.toml"); - let mut contents = String::new(); - File::open(&toml) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&toml).unwrap(); assert!(contents.contains(r#"authors = ["bar "]"#)); } @@ -256,11 +240,7 @@ .run(); let toml = paths::root().join("foo/Cargo.toml"); - let mut contents = String::new(); - File::open(&toml) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&toml).unwrap(); assert!(contents.contains(r#"authors = ["bar "]"#)); } @@ -273,11 +253,7 @@ cargo_process("new foo").env("USER", "foo").run(); let toml = paths::root().join("foo/Cargo.toml"); - let mut contents = String::new(); - File::open(&toml) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&toml).unwrap(); assert!(contents.contains(r#"authors = ["bar "]"#)); } @@ -295,11 +271,7 @@ cargo_process("init").env("USER", "foo").run(); let toml = paths::root().join("Cargo.toml"); - let mut contents = String::new(); - File::open(&toml) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&toml).unwrap(); assert!(contents.contains(r#"authors = ["bar "]"#)); } @@ -311,11 +283,7 @@ .run(); let toml = paths::root().join("foo/Cargo.toml"); - let mut contents = String::new(); - File::open(&toml) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&toml).unwrap(); assert!(contents.contains(r#"authors = ["foo "]"#), contents); } @@ -329,11 +297,7 @@ .run(); let toml = paths::root().join("foo/Cargo.toml"); - let mut contents = String::new(); - File::open(&toml) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&toml).unwrap(); assert!(contents.contains(r#"authors = ["foo "]"#)); } @@ -345,26 +309,21 @@ .unwrap(); let root = paths::root(); fs::create_dir(&root.join(".cargo")).unwrap(); - File::create(&root.join(".cargo/config")) - .unwrap() - .write_all( - br#" - [cargo-new] - name = "new-foo" - email = "new-bar" - vcs = "none" - "#, - ) - .unwrap(); + fs::write( + &root.join(".cargo/config"), + r#" + [cargo-new] + name = "new-foo" + email = "new-bar" + vcs = "none" + "#, + ) + .unwrap(); cargo_process("new foo").env("USER", "foo").run(); let toml = paths::root().join("foo/Cargo.toml"); - let mut contents = String::new(); - File::open(&toml) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&toml).unwrap(); assert!(contents.contains(r#"authors = ["new-foo "]"#)); assert!(!root.join("foo/.gitignore").exists()); } @@ -378,11 +337,7 @@ .run(); let toml = paths::root().join("foo/Cargo.toml"); - let mut contents = String::new(); - File::open(&toml) - .unwrap() - .read_to_string(&mut contents) - .unwrap(); + let contents = fs::read_to_string(&toml).unwrap(); assert!(contents.contains(r#"authors = ["bar "]"#)); } @@ -390,17 +345,16 @@ fn git_prefers_command_line() { let root = paths::root(); fs::create_dir(&root.join(".cargo")).unwrap(); - File::create(&root.join(".cargo/config")) - .unwrap() - .write_all( - br#" - [cargo-new] - vcs = "none" - name = "foo" - email = "bar" - "#, - ) - .unwrap(); + fs::write( + &root.join(".cargo/config"), + r#" + [cargo-new] + vcs = "none" + name = "foo" + email = "bar" + "#, + ) + .unwrap(); cargo_process("new foo --vcs git").env("USER", "foo").run(); assert!(paths::root().join("foo/.gitignore").exists()); @@ -483,7 +437,10 @@ fn explicit_invalid_name_not_suggested() { cargo_process("new --name 10-invalid a") .with_status(101) - .with_stderr("[ERROR] Package names starting with a digit cannot be used as a crate name") + .with_stderr( + "[ERROR] the name `10-invalid` cannot be used as a crate name, \ + the name cannot start with a digit", + ) .run(); } @@ -558,3 +515,61 @@ let after = fs::read_to_string(paths::root().join("foo/Cargo.lock")).unwrap(); assert_eq!(before, after); } + +#[cargo_test] +fn restricted_windows_name() { + if cfg!(windows) { + cargo_process("new nul") + .env("USER", "foo") + .with_status(101) + .with_stderr("[ERROR] cannot use name `nul`, it is a reserved Windows filename") + .run(); + } else { + cargo_process("new nul") + .env("USER", "foo") + .with_stderr( + "\ +[WARNING] the name `nul` is a reserved Windows filename +This package will not work on Windows platforms. +[CREATED] binary (application) `nul` package +", + ) + .run(); + } +} + +#[cargo_test] +fn non_ascii_name() { + cargo_process("new Привет") + .env("USER", "foo") + .with_stderr( + "\ +[WARNING] the name `Привет` contains non-ASCII characters +Support for non-ASCII crate names is experimental and only valid on the nightly toolchain. +[CREATED] binary (application) `Привет` package +", + ) + .run(); +} + +#[cargo_test] +fn non_ascii_name_invalid() { + // These are alphanumeric characters, but not Unicode XID. + cargo_process("new ⒶⒷⒸ") + .env("USER", "foo") + .with_status(101) + .with_stderr( + "[ERROR] invalid character `Ⓐ` in crate name: `ⒶⒷⒸ`, \ + the first character must be a Unicode XID start character (most letters or `_`)", + ) + .run(); + + cargo_process("new a¼") + .env("USER", "foo") + .with_status(101) + .with_stderr( + "[ERROR] invalid character `¼` in crate name: `a¼`, \ + characters must be Unicode XID characters (numbers, `-`, `_`, or most letters)", + ) + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/offline.rs cargo-0.47.0/tests/testsuite/offline.rs --- cargo-0.44.1/tests/testsuite/offline.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/offline.rs 2020-07-17 20:39:39.000000000 +0000 @@ -322,7 +322,7 @@ .with_status(101) .with_stderr( "\ -[ERROR] failed to download `baz v1.0.0` +[ERROR] failed to download `bar v0.1.0` Caused by: can't make HTTP request in the offline mode @@ -526,8 +526,8 @@ .with_status(101) .with_stderr("\ [ERROR] failed to select a version for the requirement `dep = \"^2.0\"` - candidate versions found which didn't match: 1.0.0 - location searched: `[..]` index (which is replacing registry `https://github.com/rust-lang/crates.io-index`) +candidate versions found which didn't match: 1.0.0 +location searched: `[..]` index (which is replacing registry `https://github.com/rust-lang/crates.io-index`) required by package `foo v0.1.0 ([..]/foo)` perhaps a crate was updated and forgotten to be re-vendored? As a reminder, you're using offline mode (--offline) which can sometimes cause \ diff -Nru cargo-0.44.1/tests/testsuite/out_dir.rs cargo-0.47.0/tests/testsuite/out_dir.rs --- cargo-0.44.1/tests/testsuite/out_dir.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/out_dir.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,11 +1,10 @@ //! Tests for --out-dir flag. -use std::env; -use std::fs::{self, File}; -use std::path::Path; - use cargo_test_support::sleep_ms; use cargo_test_support::{basic_manifest, project}; +use std::env; +use std::fs; +use std::path::Path; #[cargo_test] fn binary_with_debug() { @@ -21,6 +20,7 @@ &["foo"], &["foo", "foo.dSYM"], &["foo.exe", "foo.pdb"], + &["foo.exe"], ); } @@ -56,6 +56,7 @@ &["libfoo.a"], &["libfoo.a"], &["foo.lib"], + &["libfoo.a"], ); } @@ -89,8 +90,9 @@ check_dir_contents( &p.root().join("out"), &["libfoo.so"], - &["libfoo.dylib"], - &["foo.dll", "foo.dll.lib"], + &["libfoo.dylib", "libfoo.dylib.dSYM"], + &["foo.dll", "foo.dll.exp", "foo.dll.lib", "foo.pdb"], + &["foo.dll", "libfoo.dll.a"], ); } @@ -125,6 +127,7 @@ &["libfoo.rlib"], &["libfoo.rlib"], &["libfoo.rlib"], + &["libfoo.rlib"], ); } @@ -168,6 +171,7 @@ &["foo"], &["foo", "foo.dSYM"], &["foo.exe", "foo.pdb"], + &["foo.exe"], ); } @@ -175,8 +179,8 @@ fn out_dir_is_a_file() { let p = project() .file("src/main.rs", r#"fn main() { println!("Hello, World!") }"#) + .file("out", "") .build(); - File::create(p.root().join("out")).unwrap(); p.cargo("build -Z unstable-options --out-dir out") .masquerade_as_nightly_cargo() @@ -243,6 +247,7 @@ &["a", "b"], &["a", "a.dSYM", "b", "b.dSYM"], &["a.exe", "a.pdb", "b.exe", "b.pdb"], + &["a.exe", "b.exe"], ); } @@ -267,6 +272,7 @@ &["foo"], &["foo", "foo.dSYM"], &["foo.exe", "foo.pdb"], + &["foo.exe"], ); } @@ -274,10 +280,15 @@ out_dir: &Path, expected_linux: &[&str], expected_mac: &[&str], - expected_win: &[&str], + expected_win_msvc: &[&str], + expected_win_gnu: &[&str], ) { let expected = if cfg!(target_os = "windows") { - expected_win + if cfg!(target_env = "msvc") { + expected_win_msvc + } else { + expected_win_gnu + } } else if cfg!(target_os = "macos") { expected_mac } else { diff -Nru cargo-0.44.1/tests/testsuite/owner.rs cargo-0.47.0/tests/testsuite/owner.rs --- cargo-0.44.1/tests/testsuite/owner.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/owner.rs 2020-07-17 20:39:39.000000000 +0000 @@ -4,7 +4,7 @@ use cargo_test_support::paths::CargoPathExt; use cargo_test_support::project; -use cargo_test_support::registry::{self, api_path, registry_url}; +use cargo_test_support::registry::{self, api_path}; fn setup(name: &str, content: Option<&str>) { let dir = api_path().join(format!("api/v1/crates/{}", name)); @@ -23,6 +23,10 @@ "id": 70, "login": "github:rust-lang:core", "name": "Core" + }, + { + "id": 123, + "login": "octocat" } ] }"#; @@ -43,8 +47,13 @@ .file("src/main.rs", "fn main() {}") .build(); - p.cargo("owner -l --index") - .arg(registry_url().to_string()) + p.cargo("owner -l --token sekrit") + .with_stdout( + "\ +github:rust-lang:core (Core) +octocat +", + ) .run(); } @@ -68,8 +77,7 @@ .file("src/main.rs", "fn main() {}") .build(); - p.cargo("owner -a username --index") - .arg(registry_url().to_string()) + p.cargo("owner -a username --token sekrit") .with_status(101) .with_stderr( " Updating `[..]` index @@ -98,8 +106,7 @@ .file("src/main.rs", "fn main() {}") .build(); - p.cargo("owner -r username --index") - .arg(registry_url().to_string()) + p.cargo("owner -r username --token sekrit") .with_status(101) .with_stderr( " Updating `[..]` index diff -Nru cargo-0.44.1/tests/testsuite/package_features.rs cargo-0.47.0/tests/testsuite/package_features.rs --- cargo-0.44.1/tests/testsuite/package_features.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/package_features.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,472 @@ +//! Tests for -Zpackage-features + +use cargo_test_support::registry::Package; +use cargo_test_support::{basic_manifest, project}; + +#[cargo_test] +fn virtual_no_default_features() { + // --no-default-features in root of virtual workspace. + Package::new("dep1", "1.0.0").publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["a", "b"] + "#, + ) + .file( + "a/Cargo.toml", + r#" + [package] + name = "a" + version = "0.1.0" + + [dependencies] + dep1 = {version = "1.0", optional = true} + + [features] + default = ["dep1"] + "#, + ) + .file("a/src/lib.rs", "") + .file( + "b/Cargo.toml", + r#" + [package] + name = "b" + version = "0.1.0" + + [features] + default = ["f1"] + f1 = [] + "#, + ) + .file( + "b/src/lib.rs", + r#" + #[cfg(feature = "f1")] + compile_error!{"expected f1 off"} + "#, + ) + .build(); + + p.cargo("check --no-default-features") + .with_status(101) + .with_stderr( + "\ +[ERROR] --no-default-features is not allowed in the root of a virtual workspace +[NOTE] while this was previously accepted, it didn't actually do anything +", + ) + .run(); + + p.cargo("check --no-default-features -Zpackage-features") + .masquerade_as_nightly_cargo() + .with_stderr_unordered( + "\ +[UPDATING] [..] +[CHECKING] a v0.1.0 [..] +[CHECKING] b v0.1.0 [..] +[FINISHED] [..] +", + ) + .run(); + + p.cargo("check --features foo -Zpackage-features") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr("[ERROR] none of the selected packages contains these features: foo") + .run(); + + p.cargo("check --features a/dep1,b/f1,b/f2,f2 -Zpackage-features") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr("[ERROR] none of the selected packages contains these features: b/f2, f2") + .run(); +} + +#[cargo_test] +fn virtual_features() { + // --features in root of virtual workspace. + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["a", "b"] + "#, + ) + .file( + "a/Cargo.toml", + r#" + [package] + name = "a" + version = "0.1.0" + + [features] + f1 = [] + "#, + ) + .file( + "a/src/lib.rs", + r#" + #[cfg(not(feature = "f1"))] + compile_error!{"f1 is missing"} + "#, + ) + .file("b/Cargo.toml", &basic_manifest("b", "0.1.0")) + .file("b/src/lib.rs", "") + .build(); + + p.cargo("check --features f1") + .with_status(101) + .with_stderr( + "\ +[ERROR] --features is not allowed in the root of a virtual workspace +[NOTE] while this was previously accepted, it didn't actually do anything +", + ) + .run(); + + p.cargo("check --features f1 -Zpackage-features") + .masquerade_as_nightly_cargo() + .with_stderr_unordered( + "\ +[CHECKING] a [..] +[CHECKING] b [..] +[FINISHED] [..] +", + ) + .run(); +} + +#[cargo_test] +fn virtual_with_specific() { + // -p flags with --features in root of virtual. + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["a", "b"] + "#, + ) + .file( + "a/Cargo.toml", + r#" + [package] + name = "a" + version = "0.1.0" + + [features] + f1 = [] + f2 = [] + "#, + ) + .file( + "a/src/lib.rs", + r#" + #[cfg(not_feature = "f1")] + compile_error!{"f1 is missing"} + #[cfg(not_feature = "f2")] + compile_error!{"f2 is missing"} + "#, + ) + .file( + "b/Cargo.toml", + r#" + [package] + name = "b" + version = "0.1.0" + + [features] + f2 = [] + f3 = [] + "#, + ) + .file( + "b/src/lib.rs", + r#" + #[cfg(not_feature = "f2")] + compile_error!{"f2 is missing"} + #[cfg(not_feature = "f3")] + compile_error!{"f3 is missing"} + "#, + ) + .build(); + + p.cargo("check -p a -p b --features f1,f2,f3") + .with_status(101) + .with_stderr( + "\ +[ERROR] --features is not allowed in the root of a virtual workspace +[NOTE] while this was previously accepted, it didn't actually do anything +", + ) + .run(); + + p.cargo("check -p a -p b --features f1,f2,f3 -Zpackage-features") + .masquerade_as_nightly_cargo() + .with_stderr_unordered( + "\ +[CHECKING] a [..] +[CHECKING] b [..] +[FINISHED] [..] +", + ) + .run(); +} + +#[cargo_test] +fn other_member_from_current() { + // -p for another member while in the current directory. + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["bar"] + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + bar = { path="bar", features=["f3"] } + + [features] + f1 = ["bar/f4"] + "#, + ) + .file("src/lib.rs", "") + .file( + "bar/Cargo.toml", + r#" + [package] + name = "bar" + version = "0.1.0" + + [features] + f1 = [] + f2 = [] + f3 = [] + f4 = [] + "#, + ) + .file("bar/src/lib.rs", "") + .file( + "bar/src/main.rs", + r#" + fn main() { + if cfg!(feature = "f1") { + print!("f1"); + } + if cfg!(feature = "f2") { + print!("f2"); + } + if cfg!(feature = "f3") { + print!("f3"); + } + if cfg!(feature = "f4") { + print!("f4"); + } + println!(); + } + "#, + ) + .build(); + + p.cargo("run -p bar --features f1") + .with_stdout("f3f4") + .run(); + + p.cargo("run -p bar --features f1 -Zpackage-features") + .masquerade_as_nightly_cargo() + .with_stdout("f1") + .run(); + + p.cargo("run -p bar --features f1,f2") + .with_status(101) + .with_stderr("[ERROR] Package `foo[..]` does not have these features: `f2`") + .run(); + + p.cargo("run -p bar --features f1,f2 -Zpackage-features") + .masquerade_as_nightly_cargo() + .with_stdout("f1f2") + .run(); + + p.cargo("run -p bar --features bar/f1") + .with_stdout("f1f3") + .run(); + + p.cargo("run -p bar --features bar/f1 -Zpackage-features") + .masquerade_as_nightly_cargo() + .with_stdout("f1") + .run(); +} + +#[cargo_test] +fn virtual_member_slash() { + // member slash feature syntax + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["a"] + "#, + ) + .file( + "a/Cargo.toml", + r#" + [package] + name = "a" + version = "0.1.0" + + [dependencies] + b = {path="../b", optional=true} + + [features] + default = ["f1"] + f1 = [] + f2 = [] + "#, + ) + .file( + "a/src/lib.rs", + r#" + #[cfg(feature = "f1")] + compile_error!{"f1 is set"} + + #[cfg(feature = "f2")] + compile_error!{"f2 is set"} + + #[cfg(feature = "b")] + compile_error!{"b is set"} + "#, + ) + .file( + "b/Cargo.toml", + r#" + [package] + name = "b" + version = "0.1.0" + + [features] + bfeat = [] + "#, + ) + .file( + "b/src/lib.rs", + r#" + #[cfg(feature = "bfeat")] + compile_error!{"bfeat is set"} + "#, + ) + .build(); + + p.cargo("check --features a/f1") + .with_status(101) + .with_stderr( + "\ +[ERROR] --features is not allowed in the root of a virtual workspace +[NOTE] while this was previously accepted, it didn't actually do anything +", + ) + .run(); + + p.cargo("check -p a -Zpackage-features") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr_contains("[..]f1 is set[..]") + .with_stderr_does_not_contain("[..]f2 is set[..]") + .with_stderr_does_not_contain("[..]b is set[..]") + .run(); + + p.cargo("check -p a --features a/f1 -Zpackage-features") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr_contains("[..]f1 is set[..]") + .with_stderr_does_not_contain("[..]f2 is set[..]") + .with_stderr_does_not_contain("[..]b is set[..]") + .run(); + + p.cargo("check -p a --features a/f2 -Zpackage-features") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr_contains("[..]f1 is set[..]") + .with_stderr_contains("[..]f2 is set[..]") + .with_stderr_does_not_contain("[..]b is set[..]") + .run(); + + p.cargo("check -p a --features b/bfeat -Zpackage-features") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr_contains("[..]bfeat is set[..]") + .run(); + + p.cargo("check -p a --no-default-features -Zpackage-features") + .masquerade_as_nightly_cargo() + .run(); + + p.cargo("check -p a --no-default-features --features b -Zpackage-features") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr_contains("[..]b is set[..]") + .run(); +} + +#[cargo_test] +fn non_member() { + // -p for a non-member + Package::new("dep", "1.0.0").publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + dep = "1.0" + + [features] + f1 = [] + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("build -Zpackage-features -p dep --features f1") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr( + "[UPDATING][..]\n[ERROR] cannot specify features for packages outside of workspace", + ) + .run(); + + p.cargo("build -Zpackage-features -p dep --all-features") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr("[ERROR] cannot specify features for packages outside of workspace") + .run(); + + p.cargo("build -Zpackage-features -p dep --no-default-features") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr("[ERROR] cannot specify features for packages outside of workspace") + .run(); + + p.cargo("build -Zpackage-features -p dep") + .masquerade_as_nightly_cargo() + .with_stderr( + "\ +[DOWNLOADING] [..] +[DOWNLOADED] [..] +[COMPILING] dep [..] +[FINISHED] [..] +", + ) + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/package.rs cargo-0.47.0/tests/testsuite/package.rs --- cargo-0.44.1/tests/testsuite/package.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/package.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,15 +1,13 @@ //! Tests for the `cargo package` command. -use std::fs::{read_to_string, File}; -use std::io::prelude::*; -use std::path::Path; - use cargo_test_support::paths::CargoPathExt; use cargo_test_support::publish::validate_crate_contents; use cargo_test_support::registry::{self, Package}; use cargo_test_support::{ basic_manifest, cargo_process, git, path2url, paths, project, symlink_supported, t, }; +use std::fs::{self, read_to_string, File}; +use std::path::Path; #[cargo_test] fn simple() { @@ -560,18 +558,10 @@ #[cargo_test] fn no_duplicates_from_modified_tracked_files() { - let root = paths::root().join("all"); - let p = git::repo(&root) - .file("Cargo.toml", &basic_manifest("foo", "0.0.1")) - .file("src/main.rs", "fn main() {}") - .build(); - File::create(p.root().join("src/main.rs")) - .unwrap() - .write_all(br#"fn main() { println!("A change!"); }"#) - .unwrap(); - cargo_process("build").cwd(p.root()).run(); - cargo_process("package --list --allow-dirty") - .cwd(p.root()) + let p = git::new("all", |p| p.file("src/main.rs", "fn main() {}")); + p.change_file("src/main.rs", r#"fn main() { println!("A change!"); }"#); + p.cargo("build").run(); + p.cargo("package --list --allow-dirty") .with_stdout( "\ Cargo.lock @@ -669,17 +659,7 @@ p.cargo("package").run(); // Add another source file - let mut file = File::create(p.root().join("src").join("foo.rs")).unwrap_or_else(|e| { - panic!( - "could not create file {}: {}", - p.root().join("src/foo.rs").display(), - e - ) - }); - - file.write_all(br#"fn main() { println!("foo"); }"#) - .unwrap(); - std::mem::drop(file); + p.change_file("src/foo.rs", r#"fn main() { println!("foo"); }"#); // Check that cargo rebuilds the tarball p.cargo("package") @@ -816,7 +796,7 @@ homepage = "foo" repository = "foo" # change - "#, + "#, ); p.cargo("package") @@ -874,18 +854,7 @@ let f = File::open(&p.root().join("target/package/foo-0.0.1.crate")).unwrap(); let rewritten_toml = format!( - r#"# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - + r#"{} [package] name = "foo" version = "0.0.1" @@ -909,6 +878,7 @@ [dependencies.ghi] version = "1.0" "#, + cargo::core::package::MANIFEST_PREAMBLE, registry::alt_registry_url() ); @@ -955,28 +925,20 @@ p.cargo("package --no-verify").cwd("bar").run(); let f = File::open(&p.root().join("target/package/bar-0.1.0.crate")).unwrap(); - let rewritten_toml = r#"# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - + let rewritten_toml = format!( + r#"{} [package] name = "bar" version = "0.1.0" authors = [] -"#; +"#, + cargo::core::package::MANIFEST_PREAMBLE + ); validate_crate_contents( f, "bar-0.1.0.crate", &["Cargo.toml", "Cargo.toml.orig", "src/lib.rs"], - &[("Cargo.toml", rewritten_toml)], + &[("Cargo.toml", &rewritten_toml)], ); } @@ -1087,6 +1049,38 @@ } #[cargo_test] +fn test_edition_from_the_future() { + let p = project() + .file( + "Cargo.toml", + r#"[package] + edition = "2038" + name = "foo" + version = "99.99.99" + authors = [] + "#, + ) + .file("src/main.rs", r#""#) + .build(); + + p.cargo("build") + .with_status(101) + .with_stderr( + "\ +error: failed to parse manifest at `[..]` + +Caused by: + failed to parse the `edition` key + +Caused by: + this version of Cargo is older than the `2038` edition, and only supports `2015` and `2018` editions. +" + .to_string(), + ) + .run(); +} + +#[cargo_test] fn do_not_package_if_src_was_modified() { let p = project() .file("src/main.rs", r#"fn main() { println!("hello"); }"#) @@ -1118,14 +1112,14 @@ Caused by: Source directory was modified by build.rs during cargo publish. \ -Build scripts should not modify anything outside of OUT_DIR. -Changed: [CWD]/target/package/foo-0.0.1/bar.txt -Added: [CWD]/target/package/foo-0.0.1/new-dir -[CWD]/target/package/foo-0.0.1/src/generated.txt -Removed: [CWD]/target/package/foo-0.0.1/dir -[CWD]/target/package/foo-0.0.1/dir/foo.txt + Build scripts should not modify anything outside of OUT_DIR. + Changed: [CWD]/target/package/foo-0.0.1/bar.txt + Added: [CWD]/target/package/foo-0.0.1/new-dir + [CWD]/target/package/foo-0.0.1/src/generated.txt + Removed: [CWD]/target/package/foo-0.0.1/dir + [CWD]/target/package/foo-0.0.1/dir/foo.txt -To proceed despite this, pass the `--no-verify` flag.", + To proceed despite this, pass the `--no-verify` flag.", ) .run(); @@ -1663,3 +1657,261 @@ let orig = read_to_string(p.root().join("target/package/foo-1.0.0/Cargo.toml.orig")).unwrap(); assert!(orig.contains("license-file = \"../LICENSE\"")); } + +#[cargo_test] +#[cfg(not(windows))] // Don't want to create invalid files on Windows. +fn package_restricted_windows() { + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + license = "MIT" + description = "foo" + homepage = "foo" + "#, + ) + .file("src/lib.rs", "pub mod con;\npub mod aux;") + .file("src/con.rs", "pub fn f() {}") + .file("src/aux/mod.rs", "pub fn f() {}") + .build(); + + p.cargo("package") + .with_stderr( + "\ +[WARNING] file src/aux/mod.rs is a reserved Windows filename, it will not work on Windows platforms +[WARNING] file src/con.rs is a reserved Windows filename, it will not work on Windows platforms +[PACKAGING] foo [..] +[VERIFYING] foo [..] +[COMPILING] foo [..] +[FINISHED] [..] +", + ) + .run(); +} + +#[cargo_test] +fn finds_git_in_parent() { + // Test where `Cargo.toml` is not in the root of the git repo. + let repo_path = paths::root().join("repo"); + fs::create_dir(&repo_path).unwrap(); + let p = project() + .at("repo/foo") + .file("Cargo.toml", &basic_manifest("foo", "0.1.0")) + .file("src/lib.rs", "") + .build(); + let repo = git::init(&repo_path); + git::add(&repo); + git::commit(&repo); + p.change_file("ignoreme", ""); + p.change_file("ignoreme2", ""); + p.cargo("package --list --allow-dirty") + .with_stdout( + "\ +Cargo.toml +Cargo.toml.orig +ignoreme +ignoreme2 +src/lib.rs +", + ) + .run(); + + p.change_file(".gitignore", "ignoreme"); + p.cargo("package --list --allow-dirty") + .with_stdout( + "\ +.gitignore +Cargo.toml +Cargo.toml.orig +ignoreme2 +src/lib.rs +", + ) + .run(); + + fs::write(repo_path.join(".gitignore"), "ignoreme2").unwrap(); + p.cargo("package --list --allow-dirty") + .with_stdout( + "\ +.gitignore +Cargo.toml +Cargo.toml.orig +src/lib.rs +", + ) + .run(); +} + +#[cargo_test] +#[cfg(windows)] +fn reserved_windows_name() { + Package::new("bar", "1.0.0") + .file("src/lib.rs", "pub mod aux;") + .file("src/aux.rs", "") + .publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + license = "MIT" + description = "foo" + + [dependencies] + bar = "1.0.0" + "#, + ) + .file("src/main.rs", "extern crate bar;\nfn main() { }") + .build(); + p.cargo("package") + .with_status(101) + .with_stderr_contains( + "\ +error: failed to verify package tarball + +Caused by: + failed to download replaced source registry `[..]` + +Caused by: + failed to unpack package `[..] `[..]`)` + +Caused by: + failed to unpack entry at `[..]aux.rs` + +Caused by: + `[..]aux.rs` appears to contain a reserved Windows path, it cannot be extracted on Windows + +Caused by: + failed to unpack `[..]aux.rs` + +Caused by: + failed to unpack `[..]aux.rs` into `[..]aux.rs`", + ) + .run(); +} + +#[cargo_test] +fn list_with_path_and_lock() { + // Allow --list even for something that isn't packageable. + + // Init an empty registry because a versionless path dep will search for + // the package on crates.io. + registry::init(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + license = "MIT" + description = "foo" + homepage = "foo" + + [dependencies] + bar = {path="bar"} + "#, + ) + .file("src/main.rs", "fn main() {}") + .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) + .file("bar/src/lib.rs", "") + .build(); + + p.cargo("package --list") + .with_stdout( + "\ +Cargo.lock +Cargo.toml +Cargo.toml.orig +src/main.rs +", + ) + .run(); + + p.cargo("package") + .with_status(101) + .with_stderr( + "\ +error: all path dependencies must have a version specified when packaging. +dependency `bar` does not specify a version. +", + ) + .run(); +} + +#[cargo_test] +fn long_file_names() { + // Filenames over 100 characters require a GNU extension tarfile. + // See #8453. + + registry::init(); + let long_name = concat!( + "012345678901234567890123456789012345678901234567890123456789", + "012345678901234567890123456789012345678901234567890123456789", + "012345678901234567890123456789012345678901234567890123456789" + ); + if cfg!(windows) { + // Long paths on Windows require a special registry entry that is + // disabled by default (even on Windows 10). + // https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file + // If the directory where Cargo runs happens to be more than 80 characters + // long, then it will bump into this limit. + // + // First create a directory to account for various paths Cargo will + // be using in the target directory (such as "target/package/foo-0.1.0"). + let test_path = paths::root().join("test-dir-probe-long-path-support"); + test_path.mkdir_p(); + let test_path = test_path.join(long_name); + if let Err(e) = File::create(&test_path) { + use std::io::Write; + writeln!( + std::io::stderr(), + "\nSkipping long_file_names test, this OS or filesystem does not \ + appear to support long file paths: {:?}\n{:?}", + e, + test_path + ) + .unwrap(); + return; + } + } + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + license = "MIT" + description = "foo" + homepage = "foo" + + [dependencies] + "#, + ) + .file(long_name, "something") + .file("src/main.rs", "fn main() {}") + .build(); + + p.cargo("package").run(); + p.cargo("package --list") + .with_stdout(&format!( + "\ +{} +Cargo.lock +Cargo.toml +Cargo.toml.orig +src/main.rs +", + long_name + )) + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/patch.rs cargo-0.47.0/tests/testsuite/patch.rs --- cargo-0.44.1/tests/testsuite/patch.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/patch.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,12 +1,10 @@ //! Tests for `[patch]` table source replacement. -use std::fs::{self, File}; -use std::io::{Read, Write}; - use cargo_test_support::git; use cargo_test_support::paths; use cargo_test_support::registry::Package; -use cargo_test_support::{basic_manifest, project, t}; +use cargo_test_support::{basic_manifest, project}; +use std::fs; #[cargo_test] fn replace() { @@ -260,11 +258,7 @@ .run(); // unused patch should be in the lock file - let mut lock = String::new(); - File::open(p.root().join("Cargo.lock")) - .unwrap() - .read_to_string(&mut lock) - .unwrap(); + let lock = p.read_lockfile(); let toml: toml::Value = toml::from_str(&lock).unwrap(); assert_eq!(toml["patch"]["unused"].as_array().unwrap().len(), 1); assert_eq!(toml["patch"]["unused"][0]["name"].as_str(), Some("bar")); @@ -373,8 +367,9 @@ .run(); p.cargo("build").with_stderr("[FINISHED] [..]").run(); - t!(t!(File::create(p.root().join("Cargo.toml"))).write_all( - br#" + p.change_file( + "Cargo.toml", + r#" [package] name = "foo" version = "0.0.1" @@ -385,8 +380,8 @@ [patch.crates-io] bar = { path = 'bar' } - "# - )); + "#, + ); p.cargo("build") .with_stderr( @@ -436,8 +431,9 @@ .run(); p.cargo("build").with_stderr("[FINISHED] [..]").run(); - t!(t!(File::create(p.root().join("Cargo.toml"))).write_all( - br#" + p.change_file( + "Cargo.toml", + r#" [package] name = "foo" version = "0.0.1" @@ -448,8 +444,8 @@ [patch.crates-io] bar = { path = 'bar' } - "# - )); + "#, + ); p.cargo("build") .with_stderr( @@ -660,8 +656,9 @@ .with_stderr("[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]") .run(); - t!(t!(File::create(p.root().join("Cargo.toml"))).write_all( - br#" + p.change_file( + "Cargo.toml", + r#" [package] name = "foo" version = "0.0.1" @@ -669,8 +666,8 @@ [dependencies] bar = "0.2.0" - "# - )); + "#, + ); p.cargo("build") .with_stderr( "\ @@ -824,45 +821,31 @@ // Generate a lock file where `foo` is unused p.cargo("build").run(); - let mut lock_file1 = String::new(); - File::open(p.root().join("Cargo.lock")) - .unwrap() - .read_to_string(&mut lock_file1) - .unwrap(); + let lock_file1 = p.read_lockfile(); // Remove `foo` and generate a new lock file form the old one - File::create(p.root().join("Cargo.toml")) - .unwrap() - .write_all( - br#" - [package] - name = "foo" - version = "0.0.1" - authors = [] - - [dependencies] - bar = "0.1" - - [patch.crates-io] - bar = { path = 'bar' } - "#, - ) - .unwrap(); + p.change_file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.0.1" + authors = [] + + [dependencies] + bar = "0.1" + + [patch.crates-io] + bar = { path = 'bar' } + "#, + ); p.cargo("build").run(); - let mut lock_file2 = String::new(); - File::open(p.root().join("Cargo.lock")) - .unwrap() - .read_to_string(&mut lock_file2) - .unwrap(); + let lock_file2 = p.read_lockfile(); // Remove the lock file and build from scratch fs::remove_file(p.root().join("Cargo.lock")).unwrap(); p.cargo("build").run(); - let mut lock_file3 = String::new(); - File::open(p.root().join("Cargo.lock")) - .unwrap() - .read_to_string(&mut lock_file3) - .unwrap(); + let lock_file3 = p.read_lockfile(); assert!(lock_file1.contains("foo")); assert_eq!(lock_file2, lock_file3); @@ -1462,3 +1445,570 @@ p.cargo("build").with_stderr("[FINISHED] [..]").run(); p.cargo("build").with_stderr("[FINISHED] [..]").run(); } + +#[cargo_test] +fn update_unused_new_version() { + // If there is an unused patch entry, and then you update the patch, + // make sure `cargo update` will be able to fix the lock file. + Package::new("bar", "0.1.5").publish(); + + // Start with a lock file to 0.1.5, and an "unused" patch because the + // version is too old. + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.0.1" + + [dependencies] + bar = "0.1.5" + + [patch.crates-io] + bar = { path = "../bar" } + "#, + ) + .file("src/lib.rs", "") + .build(); + + // Patch is too old. + let bar = project() + .at("bar") + .file("Cargo.toml", &basic_manifest("bar", "0.1.4")) + .file("src/lib.rs", "") + .build(); + + p.cargo("build") + .with_stderr_contains("[WARNING] Patch `bar v0.1.4 [..] was not used in the crate graph.") + .run(); + // unused patch should be in the lock file + let lock = p.read_lockfile(); + let toml: toml::Value = toml::from_str(&lock).unwrap(); + assert_eq!(toml["patch"]["unused"].as_array().unwrap().len(), 1); + assert_eq!(toml["patch"]["unused"][0]["name"].as_str(), Some("bar")); + assert_eq!( + toml["patch"]["unused"][0]["version"].as_str(), + Some("0.1.4") + ); + + // Oh, OK, let's update to the latest version. + bar.change_file("Cargo.toml", &basic_manifest("bar", "0.1.6")); + + // Create a backup so we can test it with different options. + fs::copy(p.root().join("Cargo.lock"), p.root().join("Cargo.lock.bak")).unwrap(); + + // Try to build again, this should automatically update Cargo.lock. + p.cargo("build") + .with_stderr( + "\ +[UPDATING] `[..]/registry` index +[COMPILING] bar v0.1.6 ([..]/bar) +[COMPILING] foo v0.0.1 ([..]/foo) +[FINISHED] [..] +", + ) + .run(); + // This should not update any registry. + p.cargo("build").with_stderr("[FINISHED] [..]").run(); + assert!(!p.read_lockfile().contains("unused")); + + // Restore the lock file, and see if `update` will work, too. + fs::copy(p.root().join("Cargo.lock.bak"), p.root().join("Cargo.lock")).unwrap(); + + // Try `update -p`. + p.cargo("update -p bar") + .with_stderr( + "\ +[UPDATING] `[..]/registry` index +[ADDING] bar v0.1.6 ([..]/bar) +[REMOVING] bar v0.1.5 +", + ) + .run(); + + // Try with bare `cargo update`. + fs::copy(p.root().join("Cargo.lock.bak"), p.root().join("Cargo.lock")).unwrap(); + p.cargo("update") + .with_stderr( + "\ +[UPDATING] `[..]/registry` index +[ADDING] bar v0.1.6 ([..]/bar) +[REMOVING] bar v0.1.5 +", + ) + .run(); +} + +#[cargo_test] +fn too_many_matches() { + // The patch locations has multiple versions that match. + Package::new("bar", "0.1.0").publish(); + Package::new("bar", "0.1.0").alternative(true).publish(); + Package::new("bar", "0.1.1").alternative(true).publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + bar = "0.1" + + [patch.crates-io] + bar = { version = "0.1", registry = "alternative" } + "#, + ) + .file("src/lib.rs", "") + .build(); + + // Picks 0.1.1, the most recent version. + p.cargo("check") + .with_status(101) + .with_stderr( + "\ +[UPDATING] `[..]/alternative-registry` index +[ERROR] failed to resolve patches for `https://github.com/rust-lang/crates.io-index` + +Caused by: + patch for `bar` in `https://github.com/rust-lang/crates.io-index` failed to resolve + +Caused by: + patch for `bar` in `registry `[..]/alternative-registry`` resolved to more than one candidate + Found versions: 0.1.0, 0.1.1 + Update the patch definition to select only one package. + For example, add an `=` version requirement to the patch definition, such as `version = \"=0.1.1\"`. +", + ) + .run(); +} + +#[cargo_test] +fn no_matches() { + // A patch to a location that does not contain the named package. + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + bar = "0.1" + + [patch.crates-io] + bar = { path = "bar" } + "#, + ) + .file("src/lib.rs", "") + .file("bar/Cargo.toml", &basic_manifest("abc", "0.1.0")) + .file("bar/src/lib.rs", "") + .build(); + + p.cargo("check") + .with_status(101) + .with_stderr( + "\ +error: failed to resolve patches for `https://github.com/rust-lang/crates.io-index` + +Caused by: + patch for `bar` in `https://github.com/rust-lang/crates.io-index` failed to resolve + +Caused by: + The patch location `[..]/foo/bar` does not appear to contain any packages matching the name `bar`. +", + ) + .run(); +} + +#[cargo_test] +fn mismatched_version() { + // A patch to a location that has an old version. + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + bar = "0.1.1" + + [patch.crates-io] + bar = { path = "bar", version = "0.1.1" } + "#, + ) + .file("src/lib.rs", "") + .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) + .file("bar/src/lib.rs", "") + .build(); + + p.cargo("check") + .with_status(101) + .with_stderr( + "\ +[ERROR] failed to resolve patches for `https://github.com/rust-lang/crates.io-index` + +Caused by: + patch for `bar` in `https://github.com/rust-lang/crates.io-index` failed to resolve + +Caused by: + The patch location `[..]/foo/bar` contains a `bar` package with version `0.1.0`, \ + but the patch definition requires `^0.1.1`. + Check that the version in the patch location is what you expect, \ + and update the patch definition to match. +", + ) + .run(); +} + +#[cargo_test] +fn patch_walks_backwards() { + // Starting with a locked patch, change the patch so it points to an older version. + Package::new("bar", "0.1.0").publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + bar = "0.1" + + [patch.crates-io] + bar = {path="bar"} + "#, + ) + .file("src/lib.rs", "") + .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.1")) + .file("bar/src/lib.rs", "") + .build(); + + p.cargo("check") + .with_stderr( + "\ +[UPDATING] `[..]/registry` index +[CHECKING] bar v0.1.1 ([..]/foo/bar) +[CHECKING] foo v0.1.0 ([..]/foo) +[FINISHED] [..] +", + ) + .run(); + + // Somehow the user changes the version backwards. + p.change_file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")); + + p.cargo("check") + .with_stderr( + "\ +[UPDATING] `[..]/registry` index +[CHECKING] bar v0.1.0 ([..]/foo/bar) +[CHECKING] foo v0.1.0 ([..]/foo) +[FINISHED] [..] +", + ) + .run(); +} + +#[cargo_test] +fn patch_walks_backwards_restricted() { + // This is the same as `patch_walks_backwards`, but the patch contains a + // `version` qualifier. This is unusual, just checking a strange edge case. + Package::new("bar", "0.1.0").publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + bar = "0.1" + + [patch.crates-io] + bar = {path="bar", version="0.1.1"} + "#, + ) + .file("src/lib.rs", "") + .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.1")) + .file("bar/src/lib.rs", "") + .build(); + + p.cargo("check") + .with_stderr( + "\ +[UPDATING] `[..]/registry` index +[CHECKING] bar v0.1.1 ([..]/foo/bar) +[CHECKING] foo v0.1.0 ([..]/foo) +[FINISHED] [..] +", + ) + .run(); + + // Somehow the user changes the version backwards. + p.change_file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")); + + p.cargo("check") + .with_status(101) + .with_stderr( + "\ +error: failed to resolve patches for `https://github.com/rust-lang/crates.io-index` + +Caused by: + patch for `bar` in `https://github.com/rust-lang/crates.io-index` failed to resolve + +Caused by: + The patch location `[..]/foo/bar` contains a `bar` package with version `0.1.0`, but the patch definition requires `^0.1.1`. + Check that the version in the patch location is what you expect, and update the patch definition to match. +", + ) + .run(); +} + +#[cargo_test] +fn patched_dep_new_version() { + // What happens when a patch is locked, and then one of the patched + // dependencies needs to be updated. In this case, the baz requirement + // gets updated from 0.1.0 to 0.1.1. + Package::new("bar", "0.1.0").dep("baz", "0.1.0").publish(); + Package::new("baz", "0.1.0").publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + bar = "0.1" + + [patch.crates-io] + bar = {path="bar"} + "#, + ) + .file("src/lib.rs", "") + .file( + "bar/Cargo.toml", + r#" + [package] + name = "bar" + version = "0.1.0" + + [dependencies] + baz = "0.1" + "#, + ) + .file("bar/src/lib.rs", "") + .build(); + + // Lock everything. + p.cargo("check") + .with_stderr( + "\ +[UPDATING] `[..]/registry` index +[DOWNLOADING] crates ... +[DOWNLOADED] baz v0.1.0 [..] +[CHECKING] baz v0.1.0 +[CHECKING] bar v0.1.0 ([..]/foo/bar) +[CHECKING] foo v0.1.0 ([..]/foo) +[FINISHED] [..] +", + ) + .run(); + + Package::new("baz", "0.1.1").publish(); + + // Just the presence of the new version should not have changed anything. + p.cargo("check").with_stderr("[FINISHED] [..]").run(); + + // Modify the patch so it requires the new version. + p.change_file( + "bar/Cargo.toml", + r#" + [package] + name = "bar" + version = "0.1.0" + + [dependencies] + baz = "0.1.1" + "#, + ); + + // Should unlock and update cleanly. + p.cargo("check") + .with_stderr( + "\ +[UPDATING] `[..]/registry` index +[DOWNLOADING] crates ... +[DOWNLOADED] baz v0.1.1 (registry `[..]/registry`) +[CHECKING] baz v0.1.1 +[CHECKING] bar v0.1.0 ([..]/foo/bar) +[CHECKING] foo v0.1.0 ([..]/foo) +[FINISHED] [..] +", + ) + .run(); +} + +#[cargo_test] +fn patch_update_doesnt_update_other_sources() { + // Very extreme edge case, make sure a patch update doesn't update other + // sources. + Package::new("bar", "0.1.0").publish(); + Package::new("bar", "0.1.0").alternative(true).publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + bar = "0.1" + bar_alt = { version = "0.1", registry = "alternative", package = "bar" } + + [patch.crates-io] + bar = { path = "bar" } + "#, + ) + .file("src/lib.rs", "") + .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) + .file("bar/src/lib.rs", "") + .build(); + + p.cargo("check") + .with_stderr_unordered( + "\ +[UPDATING] `[..]/registry` index +[UPDATING] `[..]/alternative-registry` index +[DOWNLOADING] crates ... +[DOWNLOADED] bar v0.1.0 (registry `[..]/alternative-registry`) +[CHECKING] bar v0.1.0 (registry `[..]/alternative-registry`) +[CHECKING] bar v0.1.0 ([..]/foo/bar) +[CHECKING] foo v0.1.0 ([..]/foo) +[FINISHED] [..] +", + ) + .run(); + + // Publish new versions in both sources. + Package::new("bar", "0.1.1").publish(); + Package::new("bar", "0.1.1").alternative(true).publish(); + + // Since it is locked, nothing should change. + p.cargo("check").with_stderr("[FINISHED] [..]").run(); + + // Require new version on crates.io. + p.change_file("bar/Cargo.toml", &basic_manifest("bar", "0.1.1")); + + // This should not update bar_alt. + p.cargo("check") + .with_stderr( + "\ +[UPDATING] `[..]/registry` index +[CHECKING] bar v0.1.1 ([..]/foo/bar) +[CHECKING] foo v0.1.0 ([..]/foo) +[FINISHED] [..] +", + ) + .run(); +} + +#[cargo_test] +fn can_update_with_alt_reg() { + // A patch to an alt reg can update. + Package::new("bar", "0.1.0").publish(); + Package::new("bar", "0.1.0").alternative(true).publish(); + Package::new("bar", "0.1.1").alternative(true).publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + bar = "0.1" + + [patch.crates-io] + bar = { version = "=0.1.1", registry = "alternative" } + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("check") + .with_stderr( + "\ +[UPDATING] `[..]/alternative-registry` index +[UPDATING] `[..]/registry` index +[DOWNLOADING] crates ... +[DOWNLOADED] bar v0.1.1 (registry `[..]/alternative-registry`) +[CHECKING] bar v0.1.1 (registry `[..]/alternative-registry`) +[CHECKING] foo v0.1.0 ([..]/foo) +[FINISHED] [..] +", + ) + .run(); + + Package::new("bar", "0.1.2").alternative(true).publish(); + + // Should remain locked. + p.cargo("check").with_stderr("[FINISHED] [..]").run(); + + // This does nothing, due to `=` requirement. + p.cargo("update -p bar") + .with_stderr( + "\ +[UPDATING] `[..]/alternative-registry` index +[UPDATING] `[..]/registry` index +", + ) + .run(); + + // Bump to 0.1.2. + p.change_file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + bar = "0.1" + + [patch.crates-io] + bar = { version = "=0.1.2", registry = "alternative" } + "#, + ); + + p.cargo("check") + .with_stderr( + "\ +[UPDATING] `[..]/alternative-registry` index +[UPDATING] `[..]/registry` index +[DOWNLOADING] crates ... +[DOWNLOADED] bar v0.1.2 (registry `[..]/alternative-registry`) +[CHECKING] bar v0.1.2 (registry `[..]/alternative-registry`) +[CHECKING] foo v0.1.0 ([..]/foo) +[FINISHED] [..] +", + ) + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/path.rs cargo-0.47.0/tests/testsuite/path.rs --- cargo-0.44.1/tests/testsuite/path.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/path.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,12 +1,10 @@ //! Tests for `path` dependencies. -use std::fs::{self, File}; -use std::io::prelude::*; - use cargo_test_support::paths::{self, CargoPathExt}; use cargo_test_support::registry::Package; use cargo_test_support::{basic_lib_manifest, basic_manifest, main_file, project}; use cargo_test_support::{sleep_ms, t}; +use std::fs; #[cargo_test] // I have no idea why this is failing spuriously on Windows; @@ -359,10 +357,7 @@ // We base recompilation off mtime, so sleep for at least a second to ensure // that this write will change the mtime. sleep_ms(1000); - File::create(&p.root().join("baz/src/baz.rs")) - .unwrap() - .write_all(br#"pub fn baz() { println!("hello!"); }"#) - .unwrap(); + p.change_file("baz/src/baz.rs", r#"pub fn baz() { println!("hello!"); }"#); sleep_ms(1000); p.cargo("build") .with_stderr( @@ -376,15 +371,13 @@ // Make sure an update to bar doesn't trigger baz sleep_ms(1000); - File::create(&p.root().join("bar/src/bar.rs")) - .unwrap() - .write_all( - br#" - extern crate baz; - pub fn bar() { println!("hello!"); baz::baz(); } - "#, - ) - .unwrap(); + p.change_file( + "bar/src/bar.rs", + r#" + extern crate baz; + pub fn bar() { println!("hello!"); baz::baz(); } + "#, + ); sleep_ms(1000); p.cargo("build") .with_stderr( @@ -481,10 +474,7 @@ .run(); sleep_ms(1000); - File::create(&p.root().join("src/main.rs")) - .unwrap() - .write_all(br#"fn main() {}"#) - .unwrap(); + p.change_file("src/main.rs", r#"fn main() {}"#); // This shouldn't recompile `bar` p.cargo("build") @@ -548,10 +538,7 @@ .build(); fs::create_dir(&paths::root().join(".cargo")).unwrap(); - File::create(&paths::root().join(".cargo/config")) - .unwrap() - .write_all(br#"paths = ["bar"]"#) - .unwrap(); + fs::write(&paths::root().join(".cargo/config"), r#"paths = ["bar"]"#).unwrap(); let p = project() .file( @@ -725,12 +712,7 @@ p.process(&p.bin("foo")).with_stdout("0\n").run(); // Touching bar.rs.in should cause the `build` command to run again. - { - let file = fs::File::create(&p.root().join("bar/src/bar.rs.in")); - file.unwrap() - .write_all(br#"pub fn gimme() -> i32 { 1 }"#) - .unwrap(); - } + p.change_file("bar/src/bar.rs.in", "pub fn gimme() -> i32 { 1 }"); p.cargo("build") .with_stderr( @@ -963,20 +945,18 @@ p.cargo("build").run(); // Change the dependency on `bar` to an invalid path - File::create(&p.root().join("foo/Cargo.toml")) - .unwrap() - .write_all( - br#" - [project] - name = "foo" - version = "0.5.0" - authors = [] + p.change_file( + "foo/Cargo.toml", + r#" + [project] + name = "foo" + version = "0.5.0" + authors = [] - [dependencies] - bar = { path = "" } - "#, - ) - .unwrap(); + [dependencies] + bar = { path = "" } + "#, + ); // Make sure we get a nice error. In the past this actually stack // overflowed! diff -Nru cargo-0.44.1/tests/testsuite/plugins.rs cargo-0.47.0/tests/testsuite/plugins.rs --- cargo-0.44.1/tests/testsuite/plugins.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/plugins.rs 2020-07-17 20:39:39.000000000 +0000 @@ -180,7 +180,7 @@ let src = root.join(&file); let dst = out_dir.join(&file); fs::copy(src, dst).unwrap(); - if cfg!(windows) { + if cfg!(target_env = "msvc") { fs::copy(root.join("builder.dll.lib"), out_dir.join("builder.dll.lib")).unwrap(); } @@ -435,5 +435,5 @@ .file("baz/src/lib.rs", "") .build(); - p.cargo("build").run(); + p.cargo("build -v").run(); } diff -Nru cargo-0.44.1/tests/testsuite/proc_macro.rs cargo-0.47.0/tests/testsuite/proc_macro.rs --- cargo-0.44.1/tests/testsuite/proc_macro.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/proc_macro.rs 2020-07-17 20:39:39.000000000 +0000 @@ -470,3 +470,72 @@ p.cargo("test").run(); p.cargo("doc").run(); } + +#[cargo_test] +fn proc_macro_built_once() { + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ['a', 'b'] + "#, + ) + .file( + "a/Cargo.toml", + r#" + [package] + name = "a" + version = "0.1.0" + + [build-dependencies] + the-macro = { path = '../the-macro' } + "#, + ) + .file("a/build.rs", "fn main() {}") + .file("a/src/main.rs", "fn main() {}") + .file( + "b/Cargo.toml", + r#" + [package] + name = "b" + version = "0.1.0" + + [dependencies] + the-macro = { path = '../the-macro', features = ['a'] } + "#, + ) + .file("b/src/main.rs", "fn main() {}") + .file( + "the-macro/Cargo.toml", + r#" + [package] + name = "the-macro" + version = "0.1.0" + + [lib] + proc_macro = true + + [features] + a = [] + "#, + ) + .file("the-macro/src/lib.rs", "") + .build(); + p.cargo("build -Zfeatures=all --verbose") + .masquerade_as_nightly_cargo() + .with_stderr_unordered( + "\ +[COMPILING] the-macro [..] +[RUNNING] `rustc --crate-name the_macro [..]` +[COMPILING] b [..] +[RUNNING] `rustc --crate-name b [..]` +[COMPILING] a [..] +[RUNNING] `rustc --crate-name build_script_build [..]` +[RUNNING] `[..]build[..]script[..]build[..]` +[RUNNING] `rustc --crate-name a [..]` +[FINISHED] [..] +", + ) + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/profile_config.rs cargo-0.47.0/tests/testsuite/profile_config.rs --- cargo-0.44.1/tests/testsuite/profile_config.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/profile_config.rs 2020-07-17 20:39:39.000000000 +0000 @@ -27,7 +27,7 @@ Caused by: feature `named-profiles` is required -consider adding `cargo-features = [\"named-profiles\"]` to the manifest + consider adding `cargo-features = [\"named-profiles\"]` to the manifest ", ) .with_status(101) @@ -283,7 +283,7 @@ .with_stderr( "\ [COMPILING] bar [..] -[RUNNING] `rustc --crate-name bar [..] -C opt-level=2 -C codegen-units=2 [..] +[RUNNING] `rustc --crate-name bar [..] -C opt-level=2[..]-C codegen-units=2 [..] [COMPILING] foo [..] [RUNNING] `rustc --crate-name foo [..]-C codegen-units=2 [..] [FINISHED] dev [unoptimized + debuginfo] target(s) in [..]", @@ -346,7 +346,8 @@ use cargo::core::enable_nightly_features; use cargo::core::features::Features; use cargo::core::profiles::{Profiles, UnitFor}; - use cargo::core::{InternedString, PackageId}; + use cargo::core::PackageId; + use cargo::util::interning::InternedString; use cargo::util::toml::TomlProfiles; use std::fs; enable_nightly_features(); @@ -405,7 +406,8 @@ let dep_pkg = PackageId::new("dep", "0.1.0", crates_io).unwrap(); // normal package - let p = profiles.get_profile(a_pkg, true, UnitFor::new_normal(), CompileMode::Build); + let mode = CompileMode::Build; + let p = profiles.get_profile(a_pkg, true, true, UnitFor::new_normal(), mode); assert_eq!(p.name, "foo"); assert_eq!(p.codegen_units, Some(2)); // "foo" from config assert_eq!(p.opt_level, "1"); // "middle" from manifest @@ -414,7 +416,7 @@ assert_eq!(p.overflow_checks, true); // "dev" built-in (ignore package override) // build-override - let bo = profiles.get_profile(a_pkg, true, UnitFor::new_build(false), CompileMode::Build); + let bo = profiles.get_profile(a_pkg, true, true, UnitFor::new_host(false), mode); assert_eq!(bo.name, "foo"); assert_eq!(bo.codegen_units, Some(6)); // "foo" build override from config assert_eq!(bo.opt_level, "1"); // SAME as normal @@ -423,7 +425,7 @@ assert_eq!(bo.overflow_checks, true); // SAME as normal // package overrides - let po = profiles.get_profile(dep_pkg, false, UnitFor::new_normal(), CompileMode::Build); + let po = profiles.get_profile(dep_pkg, false, true, UnitFor::new_normal(), mode); assert_eq!(po.name, "foo"); assert_eq!(po.codegen_units, Some(7)); // "foo" package override from config assert_eq!(po.opt_level, "1"); // SAME as normal diff -Nru cargo-0.44.1/tests/testsuite/profile_overrides.rs cargo-0.47.0/tests/testsuite/profile_overrides.rs --- cargo-0.44.1/tests/testsuite/profile_overrides.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/profile_overrides.rs 2020-07-17 20:39:39.000000000 +0000 @@ -215,17 +215,17 @@ p.cargo("build -v").with_stderr_unordered("\ [COMPILING] m3 [..] [COMPILING] dep [..] -[RUNNING] `rustc --crate-name m3 m3/src/lib.rs [..] --crate-type lib --emit=[..]link -C codegen-units=4 [..] -[RUNNING] `rustc --crate-name dep [..]dep/src/lib.rs [..] --crate-type lib --emit=[..]link -C codegen-units=3 [..] -[RUNNING] `rustc --crate-name m3 m3/src/lib.rs [..] --crate-type lib --emit=[..]link -C codegen-units=1 [..] -[RUNNING] `rustc --crate-name build_script_build m1/build.rs [..] --crate-type bin --emit=[..]link -C codegen-units=4 [..] +[RUNNING] `rustc --crate-name m3 m3/src/lib.rs [..] --crate-type lib --emit=[..]link[..]-C codegen-units=4 [..] +[RUNNING] `rustc --crate-name dep [..]dep/src/lib.rs [..] --crate-type lib --emit=[..]link[..]-C codegen-units=3 [..] +[RUNNING] `rustc --crate-name m3 m3/src/lib.rs [..] --crate-type lib --emit=[..]link[..]-C codegen-units=1 [..] +[RUNNING] `rustc --crate-name build_script_build m1/build.rs [..] --crate-type bin --emit=[..]link[..]-C codegen-units=4 [..] [COMPILING] m2 [..] -[RUNNING] `rustc --crate-name build_script_build m2/build.rs [..] --crate-type bin --emit=[..]link -C codegen-units=2 [..] +[RUNNING] `rustc --crate-name build_script_build m2/build.rs [..] --crate-type bin --emit=[..]link[..]-C codegen-units=2 [..] [RUNNING] `[..]/m1-[..]/build-script-build` [RUNNING] `[..]/m2-[..]/build-script-build` -[RUNNING] `rustc --crate-name m2 m2/src/lib.rs [..] --crate-type lib --emit=[..]link -C codegen-units=2 [..] +[RUNNING] `rustc --crate-name m2 m2/src/lib.rs [..] --crate-type lib --emit=[..]link[..]-C codegen-units=2 [..] [COMPILING] m1 [..] -[RUNNING] `rustc --crate-name m1 m1/src/lib.rs [..] --crate-type lib --emit=[..]link -C codegen-units=1 [..] +[RUNNING] `rustc --crate-name m1 m1/src/lib.rs [..] --crate-type lib --emit=[..]link[..]-C codegen-units=1 [..] [FINISHED] dev [unoptimized + debuginfo] [..] ", ) diff -Nru cargo-0.44.1/tests/testsuite/profiles.rs cargo-0.47.0/tests/testsuite/profiles.rs --- cargo-0.44.1/tests/testsuite/profiles.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/profiles.rs 2020-07-17 20:39:39.000000000 +0000 @@ -2,7 +2,7 @@ use std::env; -use cargo_test_support::project; +use cargo_test_support::{is_nightly, project}; #[cargo_test] fn profile_overrides() { @@ -29,8 +29,8 @@ "\ [COMPILING] test v0.0.0 ([CWD]) [RUNNING] `rustc --crate-name test src/lib.rs [..]--crate-type lib \ - --emit=[..]link \ - -C opt-level=1 \ + --emit=[..]link[..]\ + -C opt-level=1[..]\ -C debug-assertions=on \ -C metadata=[..] \ -C rpath \ @@ -65,7 +65,7 @@ "\ [COMPILING] test v0.0.0 ([CWD]) [RUNNING] `rustc --crate-name test src/lib.rs [..]--crate-type lib \ - --emit=[..]link \ + --emit=[..]link[..]\ -C debuginfo=2 \ -C metadata=[..] \ --out-dir [..] \ @@ -98,7 +98,7 @@ "\ [COMPILING] test v0.0.0 ([CWD]) [RUNNING] `rustc --crate-name test src/lib.rs [..]--crate-type lib \ - --emit=[..]link \ + --emit=[..]link[..]\ -C debuginfo=1 \ -C metadata=[..] \ --out-dir [..] \ @@ -135,7 +135,7 @@ [COMPILING] test v0.0.0 ([CWD]) [RUNNING] `rustc --crate-name test src/lib.rs [..]--crate-type lib \ --emit=[..]link \ - -C opt-level={level} \ + -C opt-level={level}[..]\ -C debuginfo=2 \ -C debug-assertions=on \ -C metadata=[..] \ @@ -210,7 +210,7 @@ --crate-type dylib --crate-type rlib \ --emit=[..]link \ -C prefer-dynamic \ - -C opt-level=1 \ + -C opt-level=1[..]\ -C debuginfo=2 \ -C metadata=[..] \ --out-dir [CWD]/target/release/deps \ @@ -218,7 +218,7 @@ [COMPILING] test v0.0.0 ([CWD]) [RUNNING] `rustc --crate-name test src/lib.rs [..]--crate-type lib \ --emit=[..]link \ - -C opt-level=1 \ + -C opt-level=1[..]\ -C debuginfo=2 \ -C metadata=[..] \ --out-dir [..] \ @@ -467,4 +467,114 @@ ", ) .run(); +} + +#[cargo_test] +// Strip doesn't work on macos. +#[cfg_attr(target_os = "macos", ignore)] +fn strip_works() { + if !is_nightly() { + return; + } + + let p = project() + .file( + "Cargo.toml", + r#" + cargo-features = ["strip"] + + [package] + name = "foo" + version = "0.1.0" + + [profile.release] + strip = 'symbols' + "#, + ) + .file("src/main.rs", "fn main() {}") + .build(); + + p.cargo("build --release -v") + .masquerade_as_nightly_cargo() + .with_stderr( + "\ +[COMPILING] foo [..] +[RUNNING] `rustc [..] -Z strip=symbols [..]` +[FINISHED] [..] +", + ) + .run(); +} + +#[cargo_test] +fn strip_requires_cargo_feature() { + if !is_nightly() { + return; + } + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [profile.release] + strip = 'symbols' + "#, + ) + .file("src/main.rs", "fn main() {}") + .build(); + + p.cargo("build --release -v") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr( + "\ +[ERROR] failed to parse manifest at `[CWD]/Cargo.toml` + +Caused by: + feature `strip` is required + + consider adding `cargo-features = [\"strip\"]` to the manifest +", + ) + .run(); +} +#[cargo_test] +fn strip_rejects_invalid_option() { + if !is_nightly() { + return; + } + + let p = project() + .file( + "Cargo.toml", + r#" + cargo-features = ["strip"] + + [package] + name = "foo" + version = "0.1.0" + + [profile.release] + strip = 'wrong' + "#, + ) + .file("src/main.rs", "fn main() {}") + .build(); + + p.cargo("build --release -v") + .masquerade_as_nightly_cargo() + .with_status(101) + .with_stderr( + "\ +[ERROR] failed to parse manifest at `[CWD]/Cargo.toml` + +Caused by: + unknown variant `wrong`, expected one of `debuginfo`, `none`, `symbols` for key [..] +", + ) + .run(); } diff -Nru cargo-0.44.1/tests/testsuite/profile_targets.rs cargo-0.47.0/tests/testsuite/profile_targets.rs --- cargo-0.44.1/tests/testsuite/profile_targets.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/profile_targets.rs 2020-07-17 20:39:39.000000000 +0000 @@ -89,16 +89,16 @@ // - build_script_build is built without panic because it thinks `build.rs` is a plugin. p.cargo("build -vv").masquerade_as_nightly_cargo().with_stderr_unordered("\ [COMPILING] bar [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C panic=abort -C codegen-units=1 -C debuginfo=2 [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C panic=abort[..]-C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link[..]-C codegen-units=1 -C debuginfo=2 [..] [COMPILING] bdep [..] -[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link -C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link[..]-C codegen-units=1 -C debuginfo=2 [..] [COMPILING] foo [..] -[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link -C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link[..]-C codegen-units=1 -C debuginfo=2 [..] [RUNNING] `[..]/target/debug/build/foo-[..]/build-script-build` [foo 0.0.1] foo custom build PROFILE=debug DEBUG=true OPT_LEVEL=0 -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C panic=abort -C codegen-units=1 -C debuginfo=2 [..] -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]link -C panic=abort -C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C panic=abort[..]-C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]link -C panic=abort[..]-C codegen-units=1 -C debuginfo=2 [..] [FINISHED] dev [unoptimized + debuginfo] [..] ").run(); p.cargo("build -vv") @@ -121,16 +121,16 @@ // `build --release` p.cargo("build --release -vv").masquerade_as_nightly_cargo().with_stderr_unordered("\ [COMPILING] bar [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C panic=abort -C codegen-units=2 [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C panic=abort[..]-C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3[..]-C codegen-units=2 [..] [COMPILING] bdep [..] -[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3[..]-C codegen-units=2 [..] [COMPILING] foo [..] -[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link -C opt-level=3 -C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link -C opt-level=3[..]-C codegen-units=2 [..] [RUNNING] `[..]/target/release/build/foo-[..]/build-script-build` [foo 0.0.1] foo custom build PROFILE=release DEBUG=false OPT_LEVEL=3 -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C panic=abort -C codegen-units=2 [..] -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]link -C opt-level=3 -C panic=abort -C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C panic=abort[..]-C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]link -C opt-level=3 -C panic=abort[..]-C codegen-units=2 [..] [FINISHED] release [optimized] [..] ").run(); p.cargo("build --release -vv") @@ -179,22 +179,22 @@ // example dev build p.cargo("build --all-targets -vv").masquerade_as_nightly_cargo().with_stderr_unordered(format!("\ [COMPILING] bar [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C codegen-units=1 -C debuginfo=2 [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C panic=abort -C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link[..]-C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C panic=abort[..]-C codegen-units=1 -C debuginfo=2 [..] [COMPILING] bdep [..] -[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link -C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link[..]-C codegen-units=1 -C debuginfo=2 [..] [COMPILING] foo [..] -[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link -C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link[..]-C codegen-units=1 -C debuginfo=2 [..] [RUNNING] `[..]/target/debug/build/foo-[..]/build-script-build` [foo 0.0.1] foo custom build PROFILE=debug DEBUG=true OPT_LEVEL=0 -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C panic=abort -C codegen-units=1 -C debuginfo=2 [..]` -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--emit=[..]link -C codegen-units={affected} -C debuginfo=2 --test [..]` -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C codegen-units=1 -C debuginfo=2 [..]` -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--emit=[..]link -C codegen-units={affected} -C debuginfo=2 --test [..]` -[RUNNING] `[..] rustc --crate-name test1 tests/test1.rs [..]--emit=[..]link -C codegen-units={affected} -C debuginfo=2 --test [..]` -[RUNNING] `[..] rustc --crate-name bench1 benches/bench1.rs [..]--emit=[..]link -C codegen-units={affected} -C debuginfo=2 --test [..]` -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]link -C panic=abort -C codegen-units=1 -C debuginfo=2 [..]` -[RUNNING] `[..] rustc --crate-name ex1 examples/ex1.rs [..]--crate-type bin --emit=[..]link -C panic=abort -C codegen-units=1 -C debuginfo=2 [..]` +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C panic=abort[..]-C codegen-units=1 -C debuginfo=2 [..]` +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--emit=[..]link[..]-C codegen-units={affected} -C debuginfo=2 --test [..]` +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link[..]-C codegen-units=1 -C debuginfo=2 [..]` +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--emit=[..]link[..]-C codegen-units={affected} -C debuginfo=2 --test [..]` +[RUNNING] `[..] rustc --crate-name test1 tests/test1.rs [..]--emit=[..]link[..]-C codegen-units={affected} -C debuginfo=2 --test [..]` +[RUNNING] `[..] rustc --crate-name bench1 benches/bench1.rs [..]--emit=[..]link[..]-C codegen-units={affected} -C debuginfo=2 --test [..]` +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]link -C panic=abort[..]-C codegen-units=1 -C debuginfo=2 [..]` +[RUNNING] `[..] rustc --crate-name ex1 examples/ex1.rs [..]--crate-type bin --emit=[..]link -C panic=abort[..]-C codegen-units=1 -C debuginfo=2 [..]` [FINISHED] dev [unoptimized + debuginfo] [..] ", affected=affected)).run(); p.cargo("build -vv") @@ -246,22 +246,22 @@ // example release build p.cargo("build --all-targets --release -vv").masquerade_as_nightly_cargo().with_stderr_unordered(format!("\ [COMPILING] bar [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C codegen-units=2 [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C panic=abort -C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3[..]-C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C panic=abort[..]-C codegen-units=2 [..] [COMPILING] bdep [..] -[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3[..]-C codegen-units=2 [..] [COMPILING] foo [..] -[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link -C opt-level=3 -C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link -C opt-level=3[..]-C codegen-units=2 [..] [RUNNING] `[..]/target/release/build/foo-[..]/build-script-build` [foo 0.0.1] foo custom build PROFILE=release DEBUG=false OPT_LEVEL=3 -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C panic=abort -C codegen-units=2 [..]` -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--emit=[..]link -C opt-level=3 -C codegen-units={affected} --test [..]` -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C codegen-units=2 [..]` -[RUNNING] `[..] rustc --crate-name test1 tests/test1.rs [..]--emit=[..]link -C opt-level=3 -C codegen-units={affected} --test [..]` -[RUNNING] `[..] rustc --crate-name bench1 benches/bench1.rs [..]--emit=[..]link -C opt-level=3 -C codegen-units={affected} --test [..]` -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--emit=[..]link -C opt-level=3 -C codegen-units={affected} --test [..]` -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]link -C opt-level=3 -C panic=abort -C codegen-units=2 [..]` -[RUNNING] `[..] rustc --crate-name ex1 examples/ex1.rs [..]--crate-type bin --emit=[..]link -C opt-level=3 -C panic=abort -C codegen-units=2 [..]` +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C panic=abort[..]-C codegen-units=2 [..]` +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--emit=[..]link -C opt-level=3[..]-C codegen-units={affected} --test [..]` +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3[..]-C codegen-units=2 [..]` +[RUNNING] `[..] rustc --crate-name test1 tests/test1.rs [..]--emit=[..]link -C opt-level=3[..]-C codegen-units={affected} --test [..]` +[RUNNING] `[..] rustc --crate-name bench1 benches/bench1.rs [..]--emit=[..]link -C opt-level=3[..]-C codegen-units={affected} --test [..]` +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--emit=[..]link -C opt-level=3[..]-C codegen-units={affected} --test [..]` +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]link -C opt-level=3 -C panic=abort[..]-C codegen-units=2 [..]` +[RUNNING] `[..] rustc --crate-name ex1 examples/ex1.rs [..]--crate-type bin --emit=[..]link -C opt-level=3 -C panic=abort[..]-C codegen-units=2 [..]` [FINISHED] release [optimized] [..] ", affected=affected)).run(); p.cargo("build --all-targets --release -vv") @@ -304,21 +304,21 @@ // p.cargo("test -vv").masquerade_as_nightly_cargo().with_stderr_unordered(format!("\ [COMPILING] bar [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C codegen-units={affected} -C debuginfo=2 [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C panic=abort -C codegen-units={affected} -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link[..]-C codegen-units={affected} -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C panic=abort[..]-C codegen-units={affected} -C debuginfo=2 [..] [COMPILING] bdep [..] -[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link -C codegen-units={affected} -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link[..]-C codegen-units={affected} -C debuginfo=2 [..] [COMPILING] foo [..] -[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link -C codegen-units={affected} -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link[..]-C codegen-units={affected} -C debuginfo=2 [..] [RUNNING] `[..]/target/debug/build/foo-[..]/build-script-build` [foo 0.0.1] foo custom build PROFILE=debug DEBUG=true OPT_LEVEL=0 -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C panic=abort -C codegen-units={affected} -C debuginfo=2 [..] -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C codegen-units={affected} -C debuginfo=2 [..] -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--emit=[..]link -C codegen-units=3 -C debuginfo=2 --test [..] -[RUNNING] `[..] rustc --crate-name test1 tests/test1.rs [..]--emit=[..]link -C codegen-units=3 -C debuginfo=2 --test [..] -[RUNNING] `[..] rustc --crate-name ex1 examples/ex1.rs [..]--crate-type bin --emit=[..]link -C codegen-units={affected} -C debuginfo=2 [..] -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--emit=[..]link -C codegen-units=3 -C debuginfo=2 --test [..] -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]link -C panic=abort -C codegen-units={affected} -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C panic=abort[..]-C codegen-units={affected} -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link[..]-C codegen-units={affected} -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--emit=[..]link[..]-C codegen-units=3 -C debuginfo=2 --test [..] +[RUNNING] `[..] rustc --crate-name test1 tests/test1.rs [..]--emit=[..]link[..]-C codegen-units=3 -C debuginfo=2 --test [..] +[RUNNING] `[..] rustc --crate-name ex1 examples/ex1.rs [..]--crate-type bin --emit=[..]link[..]-C codegen-units={affected} -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--emit=[..]link[..]-C codegen-units=3 -C debuginfo=2 --test [..] +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]link -C panic=abort[..]-C codegen-units={affected} -C debuginfo=2 [..] [FINISHED] test [unoptimized + debuginfo] [..] [RUNNING] `[..]/deps/foo-[..]` [RUNNING] `[..]/deps/foo-[..]` @@ -372,21 +372,21 @@ // p.cargo("test --release -vv").masquerade_as_nightly_cargo().with_stderr_unordered(format!("\ [COMPILING] bar [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C codegen-units=2 [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C panic=abort -C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3[..]-C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C panic=abort[..]-C codegen-units=2 [..] [COMPILING] bdep [..] -[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3[..]-C codegen-units=2 [..] [COMPILING] foo [..] -[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link -C opt-level=3 -C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link -C opt-level=3[..]-C codegen-units=2 [..] [RUNNING] `[..]/target/release/build/foo-[..]/build-script-build` [foo 0.0.1] foo custom build PROFILE=release DEBUG=false OPT_LEVEL=3 -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C panic=abort -C codegen-units=2 [..] -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C codegen-units=2 [..] -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--emit=[..]link -C opt-level=3 -C codegen-units={affected} --test [..] -[RUNNING] `[..] rustc --crate-name test1 tests/test1.rs [..]--emit=[..]link -C opt-level=3 -C codegen-units={affected} --test [..] -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--emit=[..]link -C opt-level=3 -C codegen-units={affected} --test [..] -[RUNNING] `[..] rustc --crate-name ex1 examples/ex1.rs [..]--crate-type bin --emit=[..]link -C opt-level=3 -C codegen-units=2 [..] -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]link -C opt-level=3 -C panic=abort -C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C panic=abort[..]-C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3[..]-C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--emit=[..]link -C opt-level=3[..]-C codegen-units={affected} --test [..] +[RUNNING] `[..] rustc --crate-name test1 tests/test1.rs [..]--emit=[..]link -C opt-level=3[..]-C codegen-units={affected} --test [..] +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--emit=[..]link -C opt-level=3[..]-C codegen-units={affected} --test [..] +[RUNNING] `[..] rustc --crate-name ex1 examples/ex1.rs [..]--crate-type bin --emit=[..]link -C opt-level=3[..]-C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]link -C opt-level=3 -C panic=abort[..]-C codegen-units=2 [..] [FINISHED] release [optimized] [..] [RUNNING] `[..]/deps/foo-[..]` [RUNNING] `[..]/deps/foo-[..]` @@ -439,20 +439,20 @@ // p.cargo("bench -vv").masquerade_as_nightly_cargo().with_stderr_unordered(format!("\ [COMPILING] bar [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C codegen-units={affected} [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C panic=abort -C codegen-units={affected} [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3[..]-C codegen-units={affected} [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C panic=abort[..]-C codegen-units={affected} [..] [COMPILING] bdep [..] -[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C codegen-units={affected} [..] +[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3[..]-C codegen-units={affected} [..] [COMPILING] foo [..] -[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link -C opt-level=3 -C codegen-units={affected} [..] +[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link -C opt-level=3[..]-C codegen-units={affected} [..] [RUNNING] `[..]target/release/build/foo-[..]/build-script-build` [foo 0.0.1] foo custom build PROFILE=release DEBUG=false OPT_LEVEL=3 -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C panic=abort -C codegen-units={affected} [..] -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C codegen-units={affected} [..] -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--emit=[..]link -C opt-level=3 -C codegen-units=4 --test [..] -[RUNNING] `[..] rustc --crate-name bench1 benches/bench1.rs [..]--emit=[..]link -C opt-level=3 -C codegen-units=4 --test [..] -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--emit=[..]link -C opt-level=3 -C codegen-units=4 --test [..] -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]link -C opt-level=3 -C panic=abort -C codegen-units={affected} [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C panic=abort[..]-C codegen-units={affected} [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3[..]-C codegen-units={affected} [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--emit=[..]link -C opt-level=3[..]-C codegen-units=4 --test [..] +[RUNNING] `[..] rustc --crate-name bench1 benches/bench1.rs [..]--emit=[..]link -C opt-level=3[..]-C codegen-units=4 --test [..] +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--emit=[..]link -C opt-level=3[..]-C codegen-units=4 --test [..] +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]link -C opt-level=3 -C panic=abort[..]-C codegen-units={affected} [..] [FINISHED] bench [optimized] [..] [RUNNING] `[..]/deps/foo-[..] --bench` [RUNNING] `[..]/deps/foo-[..] --bench` @@ -504,23 +504,23 @@ // p.cargo("check --all-targets -vv").masquerade_as_nightly_cargo().with_stderr_unordered("\ [COMPILING] bar [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C codegen-units=1 -C debuginfo=2 [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]metadata -C codegen-units=1 -C debuginfo=2 [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]metadata -C panic=abort -C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link[..]-C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]metadata[..]-C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]metadata -C panic=abort[..]-C codegen-units=1 -C debuginfo=2 [..] [COMPILING] bdep[..] -[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link -C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link[..]-C codegen-units=1 -C debuginfo=2 [..] [COMPILING] foo [..] -[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link -C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link[..]-C codegen-units=1 -C debuginfo=2 [..] [RUNNING] `[..]target/debug/build/foo-[..]/build-script-build` [foo 0.0.1] foo custom build PROFILE=debug DEBUG=true OPT_LEVEL=0 -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]metadata -C panic=abort -C codegen-units=1 -C debuginfo=2 [..] -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]metadata -C codegen-units=1 -C debuginfo=2 [..] -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--emit=[..]metadata -C codegen-units=1 -C debuginfo=2 --test [..] -[RUNNING] `[..] rustc --crate-name test1 tests/test1.rs [..]--emit=[..]metadata -C codegen-units=1 -C debuginfo=2 --test [..] -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--emit=[..]metadata -C codegen-units=1 -C debuginfo=2 --test [..] -[RUNNING] `[..] rustc --crate-name bench1 benches/bench1.rs [..]--emit=[..]metadata -C codegen-units=1 -C debuginfo=2 --test [..] -[RUNNING] `[..] rustc --crate-name ex1 examples/ex1.rs [..]--crate-type bin --emit=[..]metadata -C panic=abort -C codegen-units=1 -C debuginfo=2 [..] -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]metadata -C panic=abort -C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]metadata -C panic=abort[..]-C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]metadata[..]-C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--emit=[..]metadata[..]-C codegen-units=1 -C debuginfo=2 --test [..] +[RUNNING] `[..] rustc --crate-name test1 tests/test1.rs [..]--emit=[..]metadata[..]-C codegen-units=1 -C debuginfo=2 --test [..] +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--emit=[..]metadata[..]-C codegen-units=1 -C debuginfo=2 --test [..] +[RUNNING] `[..] rustc --crate-name bench1 benches/bench1.rs [..]--emit=[..]metadata[..]-C codegen-units=1 -C debuginfo=2 --test [..] +[RUNNING] `[..] rustc --crate-name ex1 examples/ex1.rs [..]--crate-type bin --emit=[..]metadata -C panic=abort[..]-C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]metadata -C panic=abort[..]-C codegen-units=1 -C debuginfo=2 [..] [FINISHED] dev [unoptimized + debuginfo] [..] ").run(); // Starting with Rust 1.27, rustc emits `rmeta` files for bins, so @@ -550,23 +550,23 @@ // `dev` for all targets. p.cargo("check --all-targets --release -vv").masquerade_as_nightly_cargo().with_stderr_unordered("\ [COMPILING] bar [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C codegen-units=2 [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]metadata -C opt-level=3 -C codegen-units=2 [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]metadata -C opt-level=3 -C panic=abort -C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3[..]-C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]metadata -C opt-level=3[..]-C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]metadata -C opt-level=3 -C panic=abort[..]-C codegen-units=2 [..] [COMPILING] bdep[..] -[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3 -C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link -C opt-level=3[..]-C codegen-units=2 [..] [COMPILING] foo [..] -[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link -C opt-level=3 -C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link -C opt-level=3[..]-C codegen-units=2 [..] [RUNNING] `[..]target/release/build/foo-[..]/build-script-build` [foo 0.0.1] foo custom build PROFILE=release DEBUG=false OPT_LEVEL=3 -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]metadata -C opt-level=3 -C panic=abort -C codegen-units=2 [..] -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]metadata -C opt-level=3 -C codegen-units=2 [..] -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--emit=[..]metadata -C opt-level=3 -C codegen-units=2 --test [..] -[RUNNING] `[..] rustc --crate-name test1 tests/test1.rs [..]--emit=[..]metadata -C opt-level=3 -C codegen-units=2 --test [..] -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--emit=[..]metadata -C opt-level=3 -C codegen-units=2 --test [..] -[RUNNING] `[..] rustc --crate-name bench1 benches/bench1.rs [..]--emit=[..]metadata -C opt-level=3 -C codegen-units=2 --test [..] -[RUNNING] `[..] rustc --crate-name ex1 examples/ex1.rs [..]--crate-type bin --emit=[..]metadata -C opt-level=3 -C panic=abort -C codegen-units=2 [..] -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]metadata -C opt-level=3 -C panic=abort -C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]metadata -C opt-level=3 -C panic=abort[..]-C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]metadata -C opt-level=3[..]-C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--emit=[..]metadata -C opt-level=3[..]-C codegen-units=2 --test [..] +[RUNNING] `[..] rustc --crate-name test1 tests/test1.rs [..]--emit=[..]metadata -C opt-level=3[..]-C codegen-units=2 --test [..] +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--emit=[..]metadata -C opt-level=3[..]-C codegen-units=2 --test [..] +[RUNNING] `[..] rustc --crate-name bench1 benches/bench1.rs [..]--emit=[..]metadata -C opt-level=3[..]-C codegen-units=2 --test [..] +[RUNNING] `[..] rustc --crate-name ex1 examples/ex1.rs [..]--crate-type bin --emit=[..]metadata -C opt-level=3 -C panic=abort[..]-C codegen-units=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--crate-type bin --emit=[..]metadata -C opt-level=3 -C panic=abort[..]-C codegen-units=2 [..] [FINISHED] release [optimized] [..] ").run(); @@ -611,20 +611,20 @@ // p.cargo("check --all-targets --profile=test -vv").masquerade_as_nightly_cargo().with_stderr_unordered(format!("\ [COMPILING] bar [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C codegen-units={affected} -C debuginfo=2 [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]metadata -C codegen-units={affected} -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link[..]-C codegen-units={affected} -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]metadata[..]-C codegen-units={affected} -C debuginfo=2 [..] [COMPILING] bdep[..] -[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link -C codegen-units={affected} -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link[..]-C codegen-units={affected} -C debuginfo=2 [..] [COMPILING] foo [..] -[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link -C codegen-units={affected} -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link[..]-C codegen-units={affected} -C debuginfo=2 [..] [RUNNING] `[..]target/debug/build/foo-[..]/build-script-build` [foo 0.0.1] foo custom build PROFILE=debug DEBUG=true OPT_LEVEL=0 -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]metadata -C codegen-units={affected} -C debuginfo=2 [..] -[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--emit=[..]metadata -C codegen-units={affected} -C debuginfo=2 --test [..] -[RUNNING] `[..] rustc --crate-name test1 tests/test1.rs [..]--emit=[..]metadata -C codegen-units={affected} -C debuginfo=2 --test [..] -[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--emit=[..]metadata -C codegen-units={affected} -C debuginfo=2 --test [..] -[RUNNING] `[..] rustc --crate-name bench1 benches/bench1.rs [..]--emit=[..]metadata -C codegen-units={affected} -C debuginfo=2 --test [..] -[RUNNING] `[..] rustc --crate-name ex1 examples/ex1.rs [..]--emit=[..]metadata -C codegen-units={affected} -C debuginfo=2 --test [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]metadata[..]-C codegen-units={affected} -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name foo src/lib.rs [..]--emit=[..]metadata[..]-C codegen-units={affected} -C debuginfo=2 --test [..] +[RUNNING] `[..] rustc --crate-name test1 tests/test1.rs [..]--emit=[..]metadata[..]-C codegen-units={affected} -C debuginfo=2 --test [..] +[RUNNING] `[..] rustc --crate-name foo src/main.rs [..]--emit=[..]metadata[..]-C codegen-units={affected} -C debuginfo=2 --test [..] +[RUNNING] `[..] rustc --crate-name bench1 benches/bench1.rs [..]--emit=[..]metadata[..]-C codegen-units={affected} -C debuginfo=2 --test [..] +[RUNNING] `[..] rustc --crate-name ex1 examples/ex1.rs [..]--emit=[..]metadata[..]-C codegen-units={affected} -C debuginfo=2 --test [..] [FINISHED] test [unoptimized + debuginfo] [..] ", affected=affected)).run(); @@ -658,13 +658,13 @@ p.cargo("doc -vv").masquerade_as_nightly_cargo().with_stderr_unordered("\ [COMPILING] bar [..] [DOCUMENTING] bar [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link -C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]link[..]-C codegen-units=1 -C debuginfo=2 [..] [RUNNING] `rustdoc [..]--crate-name bar bar/src/lib.rs [..] -[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]metadata -C panic=abort -C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bar bar/src/lib.rs [..]--crate-type lib --emit=[..]metadata -C panic=abort[..]-C codegen-units=1 -C debuginfo=2 [..] [COMPILING] bdep [..] -[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link -C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name bdep bdep/src/lib.rs [..]--crate-type lib --emit=[..]link[..]-C codegen-units=1 -C debuginfo=2 [..] [COMPILING] foo [..] -[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link -C codegen-units=1 -C debuginfo=2 [..] +[RUNNING] `[..] rustc --crate-name build_script_build build.rs [..]--crate-type bin --emit=[..]link[..]-C codegen-units=1 -C debuginfo=2 [..] [RUNNING] `[..]target/debug/build/foo-[..]/build-script-build` [foo 0.0.1] foo custom build PROFILE=debug DEBUG=true OPT_LEVEL=0 [DOCUMENTING] foo [..] diff -Nru cargo-0.44.1/tests/testsuite/publish_lockfile.rs cargo-0.47.0/tests/testsuite/publish_lockfile.rs --- cargo-0.44.1/tests/testsuite/publish_lockfile.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/publish_lockfile.rs 2020-07-17 20:39:39.000000000 +0000 @@ -469,6 +469,7 @@ "\ [PACKAGING] bar v0.0.1 ([..]) [ARCHIVING] .cargo_vcs_info.json +[ARCHIVING] .gitignore [ARCHIVING] Cargo.lock [ARCHIVING] Cargo.toml [ARCHIVING] Cargo.toml.orig diff -Nru cargo-0.44.1/tests/testsuite/publish.rs cargo-0.47.0/tests/testsuite/publish.rs --- cargo-0.44.1/tests/testsuite/publish.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/publish.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,12 +1,10 @@ //! Tests for the `cargo publish` command. -use std::fs::{self, File}; -use std::io::prelude::*; - use cargo_test_support::git::{self, repo}; use cargo_test_support::paths; use cargo_test_support::registry::{self, registry_path, registry_url, Package}; -use cargo_test_support::{basic_manifest, project, publish}; +use cargo_test_support::{basic_manifest, no_such_file_err_msg, project, publish}; +use std::fs; const CLEAN_FOO_JSON: &str = r#" { @@ -91,8 +89,7 @@ .file("src/main.rs", "fn main() {}") .build(); - p.cargo("publish --no-verify --index") - .arg(registry_url().to_string()) + p.cargo("publish --no-verify --token sekrit") .with_stderr(&format!( "\ [UPDATING] `{reg}` index @@ -133,22 +130,23 @@ fs::remove_file(&credentials).unwrap(); // Verify can't publish without a token. - p.cargo("publish --no-verify --index") - .arg(registry_url().to_string()) + p.cargo("publish --no-verify") .with_status(101) - .with_stderr_contains("[ERROR] no upload token found, please run `cargo login`") + .with_stderr_contains( + "[ERROR] no upload token found, \ + please run `cargo login` or pass `--token`", + ) .run(); - File::create(&credentials) - .unwrap() - .write_all(br#"token = "api-token""#) - .unwrap(); + fs::write(&credentials, r#"token = "api-token""#).unwrap(); - p.cargo("publish --no-verify --index") - .arg(registry_url().to_string()) + p.cargo("publish --no-verify") .with_stderr(&format!( "\ [UPDATING] `{reg}` index +[WARNING] using `registry.token` config value with source replacement is deprecated +This may become a hard error in the future[..] +Use the --token command-line flag to remove this warning. [WARNING] manifest has no documentation, [..] See [..] [PACKAGING] foo v0.0.1 ([CWD]) @@ -182,7 +180,7 @@ .file("src/main.rs", "fn main() {}") .build(); - p.cargo("publish --no-verify --host") + p.cargo("publish --no-verify --token sekrit --host") .arg(registry_url().to_string()) .with_stderr(&format!( "\ @@ -229,7 +227,7 @@ .file("src/main.rs", "fn main() {}") .build(); - p.cargo("publish --no-verify --index") + p.cargo("publish --no-verify --token sekrit --index") .arg(registry_url().to_string()) .arg("--host") .arg(registry_url().to_string()) @@ -279,8 +277,7 @@ .file("src/main.rs", "fn main() {}") .build(); - p.cargo("publish -v --no-verify --index") - .arg(registry_url().to_string()) + p.cargo("publish -v --no-verify --token sekrit") .with_status(101) .with_stderr( "\ @@ -318,8 +315,7 @@ .file("bar/src/lib.rs", "") .build(); - p.cargo("publish --index") - .arg(registry_url().to_string()) + p.cargo("publish --token sekrit") .with_status(101) .with_stderr( "\ @@ -388,8 +384,7 @@ .file("src/main.rs", "fn main() {}") .build(); - p.cargo("publish --index") - .arg(registry_url().to_string()) + p.cargo("publish --token sekrit") .with_status(101) .with_stderr( "\ @@ -429,9 +424,7 @@ .file("src/main.rs", "fn main() {}") .build(); - p.cargo("publish --index") - .arg(registry_url().to_string()) - .run(); + p.cargo("publish --token sekrit").run(); validate_upload_foo_clean(); } @@ -460,11 +453,7 @@ .file("bar/src/main.rs", "fn main() {}") .build(); - p.cargo("publish") - .cwd("bar") - .arg("--index") - .arg(registry_url().to_string()) - .run(); + p.cargo("publish --token sekrit").cwd("bar").run(); validate_upload_foo_clean(); } @@ -494,9 +483,7 @@ .file(".gitignore", "baz") .build(); - p.cargo("publish --index") - .arg(registry_url().to_string()) - .run(); + p.cargo("publish --token sekrit").run(); publish::validate_upload( CLEAN_FOO_JSON, @@ -535,11 +522,7 @@ "#, ) .nocommit_file("bar/src/main.rs", "fn main() {}"); - p.cargo("publish") - .cwd("bar") - .arg("--index") - .arg(registry_url().to_string()) - .run(); + p.cargo("publish --token sekrit").cwd("bar").run(); publish::validate_upload( CLEAN_FOO_JSON, @@ -576,8 +559,7 @@ "#, ) .nocommit_file("src/main.rs", "fn main() {}"); - p.cargo("publish --index") - .arg(registry_url().to_string()) + p.cargo("publish --token sekrit") .with_status(101) .with_stderr_contains( "[ERROR] 3 files in the working directory contain \ @@ -826,8 +808,7 @@ ) .build(); - p.cargo("publish --features required --index") - .arg(registry_url().to_string()) + p.cargo("publish --features required --token sekrit") .with_stderr_contains("[UPLOADING] foo v0.0.1 ([CWD])") .run(); } @@ -860,8 +841,7 @@ ) .build(); - p.cargo("publish --all-features --index") - .arg(registry_url().to_string()) + p.cargo("publish --all-features --token sekrit") .with_stderr_contains("[UPLOADING] foo v0.0.1 ([CWD])") .run(); } @@ -894,8 +874,7 @@ ) .build(); - p.cargo("publish --no-default-features --index") - .arg(registry_url().to_string()) + p.cargo("publish --no-default-features --token sekrit") .with_stderr_contains("error: This crate requires `required` feature!") .with_status(101) .run(); @@ -936,8 +915,7 @@ p.cargo("build").run(); // Check that verify fails with patched crate which has new functionality. - p.cargo("publish --index") - .arg(registry_url().to_string()) + p.cargo("publish --token sekrit") .with_stderr_contains("[..]newfunc[..]") .with_status(101) .run(); @@ -945,9 +923,7 @@ // Remove the usage of new functionality and try again. p.change_file("src/main.rs", "extern crate bar; pub fn main() {}"); - p.cargo("publish --index") - .arg(registry_url().to_string()) - .run(); + p.cargo("publish --token sekrit").run(); // Note, use of `registry` in the deps here is an artifact that this // publishes to a fake, local registry that is pretending to be crates.io. @@ -1015,7 +991,10 @@ // Assert upload token error before the package is verified p.cargo("publish") .with_status(101) - .with_stderr_contains("[ERROR] no upload token found, please run `cargo login`") + .with_stderr_contains( + "[ERROR] no upload token found, \ + please run `cargo login` or pass `--token`", + ) .with_stderr_does_not_contain("[VERIFYING] foo v0.0.1 ([CWD])") .run(); @@ -1042,7 +1021,7 @@ .file("src/lib.rs", "") .build(); - p.cargo("publish") + p.cargo("publish --token sekrit") .with_status(101) .with_stderr( "\ @@ -1063,7 +1042,7 @@ "#, ); - p.cargo("publish") + p.cargo("publish --token sekrit") .with_status(101) .with_stderr( "\ @@ -1117,9 +1096,7 @@ .build(); p.cargo("run").with_stdout("2").run(); - p.cargo("publish --no-verify --index") - .arg(registry_url().to_string()) - .run(); + p.cargo("publish --no-verify --token sekrit").run(); publish::validate_upload_with_contents( r#" @@ -1208,8 +1185,7 @@ .file("bar/src/lib.rs", "") .build(); - p.cargo("publish --no-verify --index") - .arg(registry_url().to_string()) + p.cargo("publish --no-verify --token sekrit") .with_stderr( "\ [UPDATING] [..] @@ -1284,8 +1260,7 @@ .file("src/main.rs", "fn main() {}") .build(); - p.cargo("publish --no-verify --index") - .arg(registry_url().to_string()) + p.cargo("publish --no-verify --token sekrit") .with_stderr_contains( "\ [WARNING] Both `[..]/credentials` and `[..]/credentials.toml` exist. Using `[..]/credentials` @@ -1295,3 +1270,114 @@ validate_upload_foo(); } + +#[cargo_test] +fn index_requires_token() { + // --index will not load registry.token to avoid possibly leaking + // crates.io token to another server. + registry::init(); + let credentials = paths::home().join(".cargo/credentials"); + fs::remove_file(&credentials).unwrap(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.0.1" + authors = [] + license = "MIT" + description = "foo" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("publish --no-verify --index") + .arg(registry_url().to_string()) + .with_status(101) + .with_stderr( + "\ +[UPDATING] [..] +[ERROR] command-line argument --index requires --token to be specified +", + ) + .run(); +} + +#[cargo_test] +fn registry_token_with_source_replacement() { + // publish with source replacement without --token + registry::init(); + + let p = project() + .file( + "Cargo.toml", + r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + license = "MIT" + description = "foo" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("publish --no-verify") + .with_stderr( + "\ +[UPDATING] [..] +[WARNING] using `registry.token` config value with source replacement is deprecated +This may become a hard error in the future[..] +Use the --token command-line flag to remove this warning. +[WARNING] manifest has no documentation, [..] +See [..] +[PACKAGING] foo v0.0.1 ([CWD]) +[UPLOADING] foo v0.0.1 ([CWD]) +", + ) + .run(); +} + +#[cargo_test] +fn publish_with_missing_readme() { + registry::init(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + authors = [] + license = "MIT" + description = "foo" + homepage = "https://example.com/" + readme = "foo.md" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("publish --no-verify --token sekrit") + .with_status(101) + .with_stderr(&format!( + "\ +[UPDATING] [..] +[PACKAGING] foo v0.1.0 [..] +[UPLOADING] foo v0.1.0 [..] +[ERROR] failed to read `readme` file for package `foo v0.1.0 ([ROOT]/foo)` + +Caused by: + failed to read `[ROOT]/foo/foo.md` + +Caused by: + {} +", + no_such_file_err_msg() + )) + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/pub_priv.rs cargo-0.47.0/tests/testsuite/pub_priv.rs --- cargo-0.44.1/tests/testsuite/pub_priv.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/pub_priv.rs 2020-07-17 20:39:39.000000000 +0000 @@ -112,7 +112,7 @@ Caused by: the cargo feature `public-dependency` requires a nightly version of Cargo, but this is the `stable` channel -See https://doc.rust-lang.org/book/appendix-07-nightly-rust.html for more information about Rust release channels. + See https://doc.rust-lang.org/book/appendix-07-nightly-rust.html for more information about Rust release channels. " ) .run() @@ -150,7 +150,7 @@ Caused by: feature `public-dependency` is required -consider adding `cargo-features = [\"public-dependency\"]` to the manifest + consider adding `cargo-features = [\"public-dependency\"]` to the manifest ", ) .run() diff -Nru cargo-0.44.1/tests/testsuite/read_manifest.rs cargo-0.47.0/tests/testsuite/read_manifest.rs --- cargo-0.44.1/tests/testsuite/read_manifest.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/read_manifest.rs 2020-07-17 20:39:39.000000000 +0000 @@ -2,14 +2,16 @@ use cargo_test_support::{basic_bin_manifest, main_file, project}; -static MANIFEST_OUTPUT: &str = r#" -{ +fn manifest_output(readme_value: &str) -> String { + format!( + r#" +{{ "authors": [ "wycats@example.com" ], "categories": [], "name":"foo", - "readme": null, + "readme": {}, "repository": null, "version":"0.5.0", "id":"foo[..]0.5.0[..](path+file://[..]/foo)", @@ -21,19 +23,44 @@ "edition": "2015", "source":null, "dependencies":[], - "targets":[{ + "targets":[{{ "kind":["bin"], "crate_types":["bin"], "doctest": false, "edition": "2015", "name":"foo", "src_path":"[..]/foo/src/foo.rs" - }], - "features":{}, + }}], + "features":{{}}, "manifest_path":"[..]Cargo.toml", "metadata": null, "publish": null -}"#; +}}"#, + readme_value + ) +} + +fn manifest_output_no_readme() -> String { + manifest_output("null") +} + +pub fn basic_bin_manifest_with_readme(name: &str, readme_filename: &str) -> String { + format!( + r#" + [package] + + name = "{}" + version = "0.5.0" + authors = ["wycats@example.com"] + readme = {} + + [[bin]] + + name = "{}" + "#, + name, readme_filename, name + ) +} #[cargo_test] fn cargo_read_manifest_path_to_cargo_toml_relative() { @@ -44,7 +71,7 @@ p.cargo("read-manifest --manifest-path foo/Cargo.toml") .cwd(p.root().parent().unwrap()) - .with_json(MANIFEST_OUTPUT) + .with_json(&manifest_output_no_readme()) .run(); } @@ -58,7 +85,7 @@ p.cargo("read-manifest --manifest-path") .arg(p.root().join("Cargo.toml")) .cwd(p.root().parent().unwrap()) - .with_json(MANIFEST_OUTPUT) + .with_json(&manifest_output_no_readme()) .run(); } @@ -104,5 +131,70 @@ .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) .build(); - p.cargo("read-manifest").with_json(MANIFEST_OUTPUT).run(); + p.cargo("read-manifest") + .with_json(&manifest_output_no_readme()) + .run(); +} + +#[cargo_test] +fn cargo_read_manifest_with_specified_readme() { + let p = project() + .file( + "Cargo.toml", + &basic_bin_manifest_with_readme("foo", r#""SomeReadme.txt""#), + ) + .file("SomeReadme.txt", "Sample Project") + .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) + .build(); + + p.cargo("read-manifest") + .with_json(&manifest_output(&format!(r#""{}""#, "SomeReadme.txt"))) + .run(); +} + +#[cargo_test] +fn cargo_read_manifest_default_readme() { + let readme_filenames = ["README.md", "README.txt", "README"]; + + for readme in readme_filenames.iter() { + let p = project() + .file("Cargo.toml", &basic_bin_manifest("foo")) + .file(readme, "Sample project") + .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) + .build(); + + p.cargo("read-manifest") + .with_json(&manifest_output(&format!(r#""{}""#, readme))) + .run(); + } +} + +#[cargo_test] +fn cargo_read_manifest_suppress_default_readme() { + let p = project() + .file( + "Cargo.toml", + &basic_bin_manifest_with_readme("foo", "false"), + ) + .file("README.txt", "Sample project") + .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) + .build(); + + p.cargo("read-manifest") + .with_json(&manifest_output_no_readme()) + .run(); +} + +// If a file named README.md exists, and `readme = true`, the value `README.md` should be defaulted in. +#[cargo_test] +fn cargo_read_manifest_defaults_readme_if_true() { + let p = project() + .file("Cargo.toml", &basic_bin_manifest_with_readme("foo", "true")) + .file("README.md", "Sample project") + .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) + .build(); + + p.cargo("read-manifest") + .with_json(&manifest_output(r#""README.md""#)) + .run(); } diff -Nru cargo-0.44.1/tests/testsuite/registry.rs cargo-0.47.0/tests/testsuite/registry.rs --- cargo-0.44.1/tests/testsuite/registry.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/registry.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,15 +1,13 @@ //! Tests for normal registry dependencies. -use std::fs::{self, File}; -use std::io::prelude::*; -use std::path::Path; - use cargo::util::paths::remove_dir_all; use cargo_test_support::cargo_process; use cargo_test_support::git; use cargo_test_support::paths::{self, CargoPathExt}; -use cargo_test_support::registry::{self, registry_path, registry_url, Dependency, Package}; +use cargo_test_support::registry::{self, registry_path, Dependency, Package}; use cargo_test_support::{basic_manifest, project, t}; +use std::fs::{self, File}; +use std::path::Path; #[cargo_test] fn simple() { @@ -225,9 +223,9 @@ .with_status(101) .with_stderr_contains( "\ -error: failed to select a version for the requirement `foo = \">= 1.0.0\"` - candidate versions found which didn't match: 0.0.2, 0.0.1 - location searched: `[..]` index (which is replacing registry `[..]`) +error: failed to select a version for the requirement `foo = \">=1.0.0\"` +candidate versions found which didn't match: 0.0.2, 0.0.1 +location searched: `[..]` index (which is replacing registry `[..]`) required by package `foo v0.0.1 ([..])` ", ) @@ -240,9 +238,9 @@ .with_status(101) .with_stderr_contains( "\ -error: failed to select a version for the requirement `foo = \">= 1.0.0\"` - candidate versions found which didn't match: 0.0.4, 0.0.3, 0.0.2, ... - location searched: `[..]` index (which is replacing registry `[..]`) +error: failed to select a version for the requirement `foo = \">=1.0.0\"` +candidate versions found which didn't match: 0.0.4, 0.0.3, 0.0.2, ... +location searched: `[..]` index (which is replacing registry `[..]`) required by package `foo v0.0.1 ([..])` ", ) @@ -371,8 +369,8 @@ Caused by: no matching package named `notyet` found -location searched: registry `https://github.com/rust-lang/crates.io-index` -required by package `foo v0.0.1 [..]` + location searched: registry `https://github.com/rust-lang/crates.io-index` + required by package `foo v0.0.1 [..]` ", ) .run(); @@ -545,9 +543,9 @@ .with_status(101) .with_stderr_contains( "\ -error: failed to select a version for the requirement `baz = \"= 0.0.2\"` - candidate versions found which didn't match: 0.0.1 - location searched: `[..]` index (which is replacing registry `[..]`) +error: failed to select a version for the requirement `baz = \"=0.0.2\"` +candidate versions found which didn't match: 0.0.1 +location searched: `[..]` index (which is replacing registry `[..]`) required by package `bar v0.0.1` ... which is depended on by `foo [..]` ", @@ -676,8 +674,9 @@ Package::new("bar", "0.0.1").yanked(true).publish(); Package::new("baz", "0.0.1").publish(); - t!(t!(File::create(p.root().join("Cargo.toml"))).write_all( - br#" + p.change_file( + "Cargo.toml", + r#" [project] name = "foo" version = "0.0.1" @@ -686,8 +685,8 @@ [dependencies] bar = "*" baz = "*" - "# - )); + "#, + ); p.cargo("build").with_stdout("").run(); } @@ -902,8 +901,7 @@ ) .file("src/main.rs", "fn main() {}") .build(); - p.cargo("publish -v --index") - .arg(registry_url().to_string()) + p.cargo("publish -v --token sekrit") .with_status(101) .with_stderr_contains("[ERROR] the license file `foo` does not exist") .run(); @@ -956,8 +954,9 @@ ) .run(); - t!(t!(File::create(&p.root().join("a/Cargo.toml"))).write_all( - br#" + p.change_file( + "a/Cargo.toml", + r#" [project] name = "a" version = "0.0.1" @@ -965,8 +964,8 @@ [dependencies] bar = "0.1.0" - "# - )); + "#, + ); Package::new("bar", "0.1.0").publish(); println!("second"); @@ -1613,8 +1612,9 @@ p.cargo("build").run(); - t!(t!(File::create(p.root().join("Cargo.toml"))).write_all( - br#" + p.change_file( + "Cargo.toml", + r#" [project] name = "bar" version = "0.5.0" @@ -1623,8 +1623,8 @@ [dependencies] baz = { path = "baz" } remote = "0.3" - "# - )); + "#, + ); p.cargo("build") .with_stderr( @@ -1671,8 +1671,9 @@ p.cargo("build").run(); - t!(t!(File::create(p.root().join("Cargo.toml"))).write_all( - br#" + p.change_file( + "Cargo.toml", + r#" [project] name = "bar" version = "0.6.0" @@ -1680,8 +1681,8 @@ [dependencies] baz = { path = "baz" } - "# - )); + "#, + ); p.cargo("build") .with_stderr( @@ -1938,15 +1939,14 @@ p.cargo("build").run(); remove_dir_all(paths::home().join(".cargo/registry")).unwrap(); - File::create(paths::home().join(".gitconfig")) - .unwrap() - .write_all( - br#" + fs::write( + paths::home().join(".gitconfig"), + r#" [init] templatedir = nowhere "#, - ) - .unwrap(); + ) + .unwrap(); p.cargo("build").run(); p.cargo("build").run(); @@ -2083,3 +2083,53 @@ t!(fs::set_permissions(path, perms)); } } + +#[cargo_test] +fn registry_index_rejected() { + Package::new("dep", "0.1.0").publish(); + + let p = project() + .file( + ".cargo/config", + r#" + [registry] + index = "https://example.com/" + "#, + ) + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + dep = "0.1" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("check") + .with_status(101) + .with_stderr( + "\ +[ERROR] failed to parse manifest at `[..]/foo/Cargo.toml` + +Caused by: + the `registry.index` config value is no longer supported + Use `[source]` replacement to alter the default index for crates.io. +", + ) + .run(); + + p.cargo("login") + .with_status(101) + .with_stderr( + "\ +[ERROR] the `registry.index` config value is no longer supported +Use `[source]` replacement to alter the default index for crates.io. +", + ) + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/replace.rs cargo-0.47.0/tests/testsuite/replace.rs --- cargo-0.44.1/tests/testsuite/replace.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/replace.rs 2020-07-17 20:39:39.000000000 +0000 @@ -548,8 +548,8 @@ Caused by: no matching package for override `[..]baz:0.1.0` found -location searched: file://[..] -version required: = 0.1.0 + location searched: file://[..] + version required: =0.1.0 ", ) .run(); @@ -682,10 +682,10 @@ Caused by: overlapping replacement specifications found: - * [..] - * [..] + * [..] + * [..] -both specifications match: bar v0.1.0 + both specifications match: bar v0.1.0 ", ) .run(); diff -Nru cargo-0.44.1/tests/testsuite/required_features.rs cargo-0.47.0/tests/testsuite/required_features.rs --- cargo-0.44.1/tests/testsuite/required_features.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/required_features.rs 2020-07-17 20:39:39.000000000 +0000 @@ -4,6 +4,7 @@ assert_has_installed_exe, assert_has_not_installed_exe, cargo_home, }; use cargo_test_support::is_nightly; +use cargo_test_support::paths::CargoPathExt; use cargo_test_support::project; #[cargo_test] @@ -667,7 +668,7 @@ Caused by: target `foo` in package `foo` requires the features: `a` -Consider enabling them by passing, e.g., `--features=\"a\"` + Consider enabling them by passing, e.g., `--features=\"a\"` ", ) .run(); @@ -687,7 +688,7 @@ Caused by: target `foo` in package `foo` requires the features: `a` -Consider enabling them by passing, e.g., `--features=\"a\"` + Consider enabling them by passing, e.g., `--features=\"a\"` ", ) .run(); @@ -910,7 +911,17 @@ ) .file("src/main.rs", "fn main() {}") .file("examples/foo.rs", "fn main() {}") - .file("tests/foo.rs", "#[test]\nfn test() {}") + .file( + "tests/foo.rs", + r#" + #[test] + fn bin_is_built() { + let s = format!("target/debug/foo{}", std::env::consts::EXE_SUFFIX); + let p = std::path::Path::new(&s); + assert!(p.exists(), "foo does not exist"); + } + "#, + ) .file( "benches/foo.rs", r#" @@ -936,7 +947,9 @@ .file("bar/src/lib.rs", "") .build(); - p.cargo("build").run(); + // This is a no-op + p.cargo("build").with_stderr("[FINISHED] dev [..]").run(); + assert!(!p.bin("foo").is_file()); // bin p.cargo("build --bin=foo") @@ -967,19 +980,23 @@ assert!(p.bin("examples/foo").is_file()); // test + // This is a no-op, since no tests are enabled p.cargo("test") .with_stderr("[FINISHED] test [unoptimized + debuginfo] target(s) in [..]") .with_stdout("") .run(); + // Delete the target directory so this can check if the main.rs gets built. + p.build_dir().rm_rf(); p.cargo("test --test=foo --features bar/a") .with_stderr( "\ +[COMPILING] bar v0.0.1 ([CWD]/bar) [COMPILING] foo v0.0.1 ([CWD]) [FINISHED] test [unoptimized + debuginfo] target(s) in [..] [RUNNING] target/debug/deps/foo-[..][EXE]", ) - .with_stdout_contains("test test ... ok") + .with_stdout_contains("test bin_is_built ... ok") .run(); // bench diff -Nru cargo-0.44.1/tests/testsuite/run.rs cargo-0.47.0/tests/testsuite/run.rs --- cargo-0.44.1/tests/testsuite/run.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/run.rs 2020-07-17 20:39:39.000000000 +0000 @@ -275,7 +275,7 @@ Caused by: default-run target `b` not found -Did you mean `a`? + Did you mean `a`? ", ) .run(); @@ -632,14 +632,14 @@ [COMPILING] bar v0.5.0 ([CWD]/bar) [RUNNING] `rustc --crate-name bar bar/src/bar.rs [..]--crate-type lib \ --emit=[..]link \ - -C opt-level=3 \ + -C opt-level=3[..]\ -C metadata=[..] \ --out-dir [CWD]/target/release/deps \ -L dependency=[CWD]/target/release/deps` [COMPILING] foo v0.0.1 ([CWD]) [RUNNING] `rustc --crate-name a examples/a.rs [..]--crate-type bin \ --emit=[..]link \ - -C opt-level=3 \ + -C opt-level=3[..]\ -C metadata=[..] \ --out-dir [CWD]/target/release/examples \ -L dependency=[CWD]/target/release/deps \ @@ -660,14 +660,14 @@ "\ [COMPILING] bar v0.5.0 ([CWD]/bar) [RUNNING] `rustc --crate-name bar bar/src/bar.rs [..]--crate-type lib \ - --emit=[..]link \ + --emit=[..]link[..]\ -C debuginfo=2 \ -C metadata=[..] \ --out-dir [CWD]/target/debug/deps \ -L dependency=[CWD]/target/debug/deps` [COMPILING] foo v0.0.1 ([CWD]) [RUNNING] `rustc --crate-name a examples/a.rs [..]--crate-type bin \ - --emit=[..]link \ + --emit=[..]link[..]\ -C debuginfo=2 \ -C metadata=[..] \ --out-dir [CWD]/target/debug/examples \ @@ -849,6 +849,7 @@ fn main() {{ let search_path = std::env::var_os("{}").unwrap(); let paths = std::env::split_paths(&search_path).collect::>(); + println!("{{:#?}}", paths); assert!(paths.contains(&r#"{}"#.into())); assert!(paths.contains(&r#"{}"#.into())); }} diff -Nru cargo-0.44.1/tests/testsuite/rustc_info_cache.rs cargo-0.47.0/tests/testsuite/rustc_info_cache.rs --- cargo-0.44.1/tests/testsuite/rustc_info_cache.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/rustc_info_cache.rs 2020-07-17 20:39:39.000000000 +0000 @@ -6,6 +6,12 @@ #[cargo_test] fn rustc_info_cache() { + // Needs `-Cbitcode-in-rlib` to ride to stable before this can be enabled + // everywhere. + if !cargo_test_support::is_nightly() { + return; + } + let p = project() .file("src/main.rs", r#"fn main() { println!("hello"); }"#) .build(); diff -Nru cargo-0.44.1/tests/testsuite/rustc.rs cargo-0.47.0/tests/testsuite/rustc.rs --- cargo-0.44.1/tests/testsuite/rustc.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/rustc.rs 2020-07-17 20:39:39.000000000 +0000 @@ -18,7 +18,7 @@ "\ [COMPILING] foo v0.0.1 ([CWD]) [RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ - --emit=[..]link -C debuginfo=2 \ + --emit=[..]link[..]-C debuginfo=2 \ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/debug/deps` @@ -40,7 +40,7 @@ "\ [COMPILING] foo v0.0.1 ([CWD]) [RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib \ - --emit=[..]link -C debuginfo=2 \ + --emit=[..]link[..]-C debuginfo=2 \ -C debug-assertions=off \ -C metadata=[..] \ --out-dir [..] \ @@ -63,12 +63,12 @@ "\ [COMPILING] {name} v{version} ([CWD]) [RUNNING] `rustc --crate-name {name} src/lib.rs [..]--crate-type lib \ - --emit=[..]link -C debuginfo=2 \ + --emit=[..]link[..]-C debuginfo=2 \ -C metadata=[..] \ --out-dir [..] \ -L dependency=[CWD]/target/debug/deps` [RUNNING] `rustc --crate-name {name} src/main.rs [..]--crate-type bin \ - --emit=[..]link -C debuginfo=2 \ + --emit=[..]link[..]-C debuginfo=2 \ -C debug-assertions \ -C metadata=[..] \ --out-dir [..] \ @@ -108,10 +108,10 @@ .with_stderr( "\ [COMPILING] foo v0.0.1 ([CWD]) -[RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link \ +[RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link[..]\ -C debuginfo=2 -C metadata=[..] \ --out-dir [..]` -[RUNNING] `rustc --crate-name bar src/bin/bar.rs [..]--crate-type bin --emit=[..]link \ +[RUNNING] `rustc --crate-name bar src/bin/bar.rs [..]--crate-type bin --emit=[..]link[..]\ -C debuginfo=2 -C debug-assertions [..]` [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", @@ -147,10 +147,10 @@ .with_stderr( "\ [COMPILING] foo v0.0.1 ([CWD]) -[RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link \ +[RUNNING] `rustc --crate-name foo src/lib.rs [..]--crate-type lib --emit=[..]link[..]\ -C debuginfo=2 -C metadata=[..] \ --out-dir [..]` -[RUNNING] `rustc --crate-name bar tests/bar.rs [..]--emit=[..]link -C debuginfo=2 \ +[RUNNING] `rustc --crate-name bar tests/bar.rs [..]--emit=[..]link[..]-C debuginfo=2 \ -C debug-assertions [..]--test[..]` [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", @@ -261,7 +261,7 @@ ) // unit test .with_stderr_contains( - "[RUNNING] `rustc --crate-name foo src/main.rs [..]--emit=[..]link \ + "[RUNNING] `rustc --crate-name foo src/main.rs [..]--emit=[..]link[..]\ -C debuginfo=2 --test [..]", ) .run(); diff -Nru cargo-0.44.1/tests/testsuite/rustdoc_extern_html.rs cargo-0.47.0/tests/testsuite/rustdoc_extern_html.rs --- cargo-0.44.1/tests/testsuite/rustdoc_extern_html.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/rustdoc_extern_html.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,375 @@ +//! Tests for the -Zrustdoc-map feature. + +use cargo_test_support::registry::Package; +use cargo_test_support::{is_nightly, paths, project, Project}; + +fn basic_project() -> Project { + Package::new("bar", "1.0.0") + .file("src/lib.rs", "pub struct Straw;") + .publish(); + + project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + edition = "2018" + + [dependencies] + bar = "1.0" + "#, + ) + .file( + "src/lib.rs", + r#" + pub fn myfun() -> Option { + None + } + "#, + ) + .build() +} + +fn docs_rs(p: &Project) { + p.change_file( + ".cargo/config", + r#" + [doc.extern-map.registries] + crates-io = "https://docs.rs/" + "#, + ); +} + +#[cargo_test] +fn ignores_on_stable() { + // Requires -Zrustdoc-map to use. + let p = basic_project(); + docs_rs(&p); + p.cargo("doc -v --no-deps") + .with_stderr_does_not_contain("[..]--extern-html-root-url[..]") + .run(); +} + +#[cargo_test] +fn simple() { + // Basic test that it works with crates.io. + if !is_nightly() { + // --extern-html-root-url is unstable + return; + } + let p = basic_project(); + docs_rs(&p); + p.cargo("doc -v --no-deps -Zrustdoc-map") + .masquerade_as_nightly_cargo() + .with_stderr_contains( + "[RUNNING] `rustdoc [..]--crate-name foo [..]bar=https://docs.rs/bar/1.0.0/[..]", + ) + .run(); + let myfun = p.read_file("target/doc/foo/fn.myfun.html"); + assert!(myfun.contains(r#"href="https://docs.rs/bar/1.0.0/bar/struct.Straw.html""#)); +} + +#[cargo_test] +fn std_docs() { + // Mapping std docs somewhere else. + if !is_nightly() { + // --extern-html-root-url is unstable + return; + } + // For local developers, skip this test if docs aren't installed. + let docs = std::path::Path::new(&paths::sysroot()).join("share/doc/rust/html"); + if !docs.exists() { + if cargo::util::is_ci() { + panic!("std docs are not installed, check that the rust-docs component is installed"); + } else { + eprintln!( + "documentation not found at {}, \ + skipping test (run `rustdoc component add rust-docs` to install", + docs.display() + ); + return; + } + } + let p = basic_project(); + p.change_file( + ".cargo/config", + r#" + [doc.extern-map] + std = "local" + "#, + ); + p.cargo("doc -v --no-deps -Zrustdoc-map") + .masquerade_as_nightly_cargo() + .with_stderr_contains("[RUNNING] `rustdoc [..]--crate-name foo [..]std=file://[..]") + .run(); + let myfun = p.read_file("target/doc/foo/fn.myfun.html"); + assert!(myfun.contains(r#"share/doc/rust/html/core/option/enum.Option.html""#)); + + p.change_file( + ".cargo/config", + r#" + [doc.extern-map] + std = "https://example.com/rust/" + "#, + ); + p.cargo("doc -v --no-deps -Zrustdoc-map") + .masquerade_as_nightly_cargo() + .with_stderr_contains( + "[RUNNING] `rustdoc [..]--crate-name foo [..]std=https://example.com/rust/[..]", + ) + .run(); + let myfun = p.read_file("target/doc/foo/fn.myfun.html"); + assert!(myfun.contains(r#"href="https://example.com/rust/core/option/enum.Option.html""#)); +} + +#[cargo_test] +fn renamed_dep() { + // Handles renamed dependencies. + if !is_nightly() { + // --extern-html-root-url is unstable + return; + } + Package::new("bar", "1.0.0") + .file("src/lib.rs", "pub struct Straw;") + .publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + edition = "2018" + + [dependencies] + groovy = { version = "1.0", package = "bar" } + "#, + ) + .file( + "src/lib.rs", + r#" + pub fn myfun() -> Option { + None + } + "#, + ) + .build(); + docs_rs(&p); + p.cargo("doc -v --no-deps -Zrustdoc-map") + .masquerade_as_nightly_cargo() + .with_stderr_contains( + "[RUNNING] `rustdoc [..]--crate-name foo [..]bar=https://docs.rs/bar/1.0.0/[..]", + ) + .run(); + let myfun = p.read_file("target/doc/foo/fn.myfun.html"); + assert!(myfun.contains(r#"href="https://docs.rs/bar/1.0.0/bar/struct.Straw.html""#)); +} + +#[cargo_test] +fn lib_name() { + // Handles lib name != package name. + if !is_nightly() { + // --extern-html-root-url is unstable + return; + } + Package::new("bar", "1.0.0") + .file( + "Cargo.toml", + r#" + [package] + name = "bar" + version = "1.0.0" + + [lib] + name = "rumpelstiltskin" + "#, + ) + .file("src/lib.rs", "pub struct Straw;") + .publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + bar = "1.0" + "#, + ) + .file( + "src/lib.rs", + r#" + pub fn myfun() -> Option { + None + } + "#, + ) + .build(); + docs_rs(&p); + p.cargo("doc -v --no-deps -Zrustdoc-map") + .masquerade_as_nightly_cargo() + .with_stderr_contains( + "[RUNNING] `rustdoc [..]--crate-name foo [..]rumpelstiltskin=https://docs.rs/bar/1.0.0/[..]", + ) + .run(); + let myfun = p.read_file("target/doc/foo/fn.myfun.html"); + assert!(myfun.contains(r#"href="https://docs.rs/bar/1.0.0/rumpelstiltskin/struct.Straw.html""#)); +} + +#[cargo_test] +fn alt_registry() { + // Supports other registry names. + if !is_nightly() { + // --extern-html-root-url is unstable + return; + } + Package::new("bar", "1.0.0") + .alternative(true) + .file( + "src/lib.rs", + r#" + extern crate baz; + pub struct Queen; + pub use baz::King; + "#, + ) + .registry_dep("baz", "1.0") + .publish(); + Package::new("baz", "1.0.0") + .alternative(true) + .file("src/lib.rs", "pub struct King;") + .publish(); + Package::new("grimm", "1.0.0") + .file("src/lib.rs", "pub struct Gold;") + .publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + edition = "2018" + + [dependencies] + bar = { version = "1.0", registry="alternative" } + grimm = "1.0" + "#, + ) + .file( + "src/lib.rs", + r#" + pub fn queen() -> bar::Queen { bar::Queen } + pub fn king() -> bar::King { bar::King } + pub fn gold() -> grimm::Gold { grimm::Gold } + "#, + ) + .file( + ".cargo/config", + r#" + [doc.extern-map.registries] + alternative = "https://example.com/{pkg_name}/{version}/" + crates-io = "https://docs.rs/" + "#, + ) + .build(); + p.cargo("doc -v --no-deps -Zrustdoc-map") + .masquerade_as_nightly_cargo() + .with_stderr_contains( + "[RUNNING] `rustdoc [..]--crate-name foo \ + [..]bar=https://example.com/bar/1.0.0/[..]grimm=https://docs.rs/grimm/1.0.0/[..]", + ) + .run(); + let queen = p.read_file("target/doc/foo/fn.queen.html"); + assert!(queen.contains(r#"href="https://example.com/bar/1.0.0/bar/struct.Queen.html""#)); + // The king example fails to link. Rustdoc seems to want the origin crate + // name (baz) for re-exports. There are many issues in the issue tracker + // for rustdoc re-exports, so I'm not sure, but I think this is maybe a + // rustdoc issue. Alternatively, Cargo could provide mappings for all + // transitive dependencies to fix this. + let king = p.read_file("target/doc/foo/fn.king.html"); + assert!(king.contains(r#"-> King"#)); + + let gold = p.read_file("target/doc/foo/fn.gold.html"); + assert!(gold.contains(r#"href="https://docs.rs/grimm/1.0.0/grimm/struct.Gold.html""#)); +} + +#[cargo_test] +fn multiple_versions() { + // What happens when there are multiple versions. + // NOTE: This is currently broken behavior. Rustdoc does not provide a way + // to match renamed dependencies. + if !is_nightly() { + // --extern-html-root-url is unstable + return; + } + Package::new("bar", "1.0.0") + .file("src/lib.rs", "pub struct Spin;") + .publish(); + Package::new("bar", "2.0.0") + .file("src/lib.rs", "pub struct Straw;") + .publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + edition = "2018" + + [dependencies] + bar = "1.0" + bar2 = {version="2.0", package="bar"} + "#, + ) + .file( + "src/lib.rs", + " + pub fn fn1() -> bar::Spin {bar::Spin} + pub fn fn2() -> bar2::Straw {bar2::Straw} + ", + ) + .build(); + docs_rs(&p); + p.cargo("doc -v --no-deps -Zrustdoc-map") + .masquerade_as_nightly_cargo() + .with_stderr_contains( + "[RUNNING] `rustdoc [..]--crate-name foo \ + [..]bar=https://docs.rs/bar/1.0.0/[..]bar=https://docs.rs/bar/2.0.0/[..]", + ) + .run(); + let fn1 = p.read_file("target/doc/foo/fn.fn1.html"); + // This should be 1.0.0, rustdoc seems to use the last entry when there + // are duplicates. + assert!(fn1.contains(r#"href="https://docs.rs/bar/2.0.0/bar/struct.Spin.html""#)); + let fn2 = p.read_file("target/doc/foo/fn.fn2.html"); + assert!(fn2.contains(r#"href="https://docs.rs/bar/2.0.0/bar/struct.Straw.html""#)); +} + +#[cargo_test] +fn rebuilds_when_changing() { + // Make sure it rebuilds if the map changes. + if !is_nightly() { + // --extern-html-root-url is unstable + return; + } + let p = basic_project(); + p.cargo("doc -v --no-deps -Zrustdoc-map") + .masquerade_as_nightly_cargo() + .with_stderr_does_not_contain("[..]--extern-html-root-url[..]") + .run(); + + docs_rs(&p); + p.cargo("doc -v --no-deps -Zrustdoc-map") + .masquerade_as_nightly_cargo() + .with_stderr_contains("[..]--extern-html-root-url[..]") + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/rustdoc.rs cargo-0.47.0/tests/testsuite/rustdoc.rs --- cargo-0.44.1/tests/testsuite/rustdoc.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/rustdoc.rs 2020-07-17 20:39:39.000000000 +0000 @@ -40,6 +40,17 @@ } #[cargo_test] +fn rustdoc_binary_args_passed() { + let p = project().file("src/main.rs", "").build(); + + p.cargo("rustdoc -v") + .arg("--") + .arg("--markdown-no-toc") + .with_stderr_contains("[RUNNING] `rustdoc [..] --markdown-no-toc[..]`") + .run(); +} + +#[cargo_test] fn rustdoc_foo_with_bar_dependency() { let foo = project() .file( diff -Nru cargo-0.44.1/tests/testsuite/rustflags.rs cargo-0.47.0/tests/testsuite/rustflags.rs --- cargo-0.44.1/tests/testsuite/rustflags.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/rustflags.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,12 +1,10 @@ //! Tests for setting custom rustc flags. -use std::fs::{self, File}; -use std::io::Write; - use cargo_test_support::registry::Package; use cargo_test_support::{ basic_lib_manifest, basic_manifest, paths, project, project_in_home, rustc_host, }; +use std::fs; #[cargo_test] fn env_rustflags_normal_source() { @@ -871,8 +869,7 @@ "#; let config_file = paths::root().join("foo/.cargo/config"); fs::create_dir_all(config_file.parent().unwrap()).unwrap(); - let mut config_file = File::create(config_file).unwrap(); - config_file.write_all(config.as_bytes()).unwrap(); + fs::write(config_file, config).unwrap(); p.cargo("build") .with_status(101) @@ -893,8 +890,7 @@ "#; let config_file = paths::root().join("foo/.cargo/config"); fs::create_dir_all(config_file.parent().unwrap()).unwrap(); - let mut config_file = File::create(config_file).unwrap(); - config_file.write_all(config.as_bytes()).unwrap(); + fs::write(config_file, config).unwrap(); p.cargo("build") .with_status(101) @@ -928,15 +924,14 @@ let home = paths::home(); let home_config = home.join(".cargo"); fs::create_dir(&home_config).unwrap(); - File::create(&home_config.join("config")) - .unwrap() - .write_all( - br#" - [build] - rustflags = ["-Cllvm-args=-x86-asm-syntax=intel"] - "#, - ) - .unwrap(); + fs::write( + &home_config.join("config"), + r#" + [build] + rustflags = ["-Cllvm-args=-x86-asm-syntax=intel"] + "#, + ) + .unwrap(); // And we need the project to be inside the home directory // so the walking process finds the home project twice. diff -Nru cargo-0.44.1/tests/testsuite/search.rs cargo-0.47.0/tests/testsuite/search.rs --- cargo-0.44.1/tests/testsuite/search.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/search.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,14 +1,12 @@ //! Tests for the `cargo search` command. -use std::collections::HashSet; -use std::fs::{self, File}; -use std::io::prelude::*; -use std::path::Path; - use cargo_test_support::cargo_process; use cargo_test_support::git::repo; use cargo_test_support::paths; use cargo_test_support::registry::{api_path, registry_path, registry_url}; +use std::collections::HashSet; +use std::fs; +use std::path::Path; use url::Url; fn api() -> Url { @@ -37,9 +35,45 @@ "repository": "https://github.com/nick29581/libhoare", "updated_at": "2014-11-20T21:49:21Z", "versions": null - }], + }, + { + "id": "postgres", + "name": "postgres", + "updated_at": "2020-05-01T23:17:54.335921+00:00", + "versions": null, + "keywords": null, + "categories": null, + "badges": [ + { + "badge_type": "circle-ci", + "attributes": { + "repository": "sfackler/rust-postgres", + "branch": null + } + } + ], + "created_at": "2014-11-24T02:34:44.756689+00:00", + "downloads": 535491, + "recent_downloads": 88321, + "max_version": "0.17.3", + "newest_version": "0.17.3", + "description": "A native, synchronous PostgreSQL client", + "homepage": null, + "documentation": null, + "repository": "https://github.com/sfackler/rust-postgres", + "links": { + "version_downloads": "/api/v1/crates/postgres/downloads", + "versions": "/api/v1/crates/postgres/versions", + "owners": "/api/v1/crates/postgres/owners", + "owner_team": "/api/v1/crates/postgres/owner_team", + "owner_user": "/api/v1/crates/postgres/owner_user", + "reverse_dependencies": "/api/v1/crates/postgres/reverse_dependencies" + }, + "exact_match": true + } + ], "meta": { - "total": 1 + "total": 2 } }"#; @@ -48,18 +82,21 @@ // // On windows, though, `?` is an invalid character, but we always build curl // from source there anyway! - File::create(&dest) - .unwrap() - .write_all(content.as_bytes()) - .unwrap(); + fs::write(&dest, content).unwrap(); if !cfg!(windows) { - File::create(&dest.with_file_name("crates?q=postgres&per_page=10")) - .unwrap() - .write_all(content.as_bytes()) - .unwrap(); + fs::write( + &dest.with_file_name("crates?q=postgres&per_page=10"), + content, + ) + .unwrap(); } } +const SEARCH_RESULTS: &str = "\ +hoare = \"0.1.1\" # Design by contract style assertions for Rust +postgres = \"0.17.3\" # A native, synchronous PostgreSQL client +"; + fn setup() { let cargo_home = paths::root().join(".cargo"); fs::create_dir_all(cargo_home).unwrap(); @@ -80,11 +117,10 @@ fn set_cargo_config() { let config = paths::root().join(".cargo/config"); - File::create(&config) - .unwrap() - .write_all( - format!( - r#" + fs::write( + &config, + format!( + r#" [source.crates-io] registry = 'https://wut' replace-with = 'dummy-registry' @@ -92,11 +128,10 @@ [source.dummy-registry] registry = '{reg}' "#, - reg = registry_url(), - ) - .as_bytes(), - ) - .unwrap(); + reg = registry_url(), + ), + ) + .unwrap(); } #[cargo_test] @@ -120,7 +155,7 @@ drop(lock); cargo_process("search postgres") - .with_stdout_contains("hoare = \"0.1.1\" # Design by contract style assertions for Rust") + .with_stdout_contains(SEARCH_RESULTS) .with_stderr("") // without "Updating ... index" .run(); } @@ -131,7 +166,7 @@ set_cargo_config(); cargo_process("search postgres") - .with_stdout_contains("hoare = \"0.1.1\" # Design by contract style assertions for Rust") + .with_stdout_contains(SEARCH_RESULTS) .with_stderr_contains("[..]Updating [..] index") .run(); } @@ -142,7 +177,7 @@ cargo_process("search postgres --index") .arg(registry_url().to_string()) - .with_stdout_contains("hoare = \"0.1.1\" # Design by contract style assertions for Rust") + .with_stdout_contains(SEARCH_RESULTS) .run(); } @@ -168,7 +203,7 @@ [UPDATING] `[CWD]/registry` index ", ) - .with_stdout_contains("hoare = \"0.1.1\" # Design by contract style assertions for Rust") + .with_stdout_contains(SEARCH_RESULTS) .run(); } @@ -196,7 +231,7 @@ [UPDATING] `[CWD]/registry` index ", ) - .with_stdout_contains("hoare = \"0.1.1\" # Design by contract style assertions for Rust") + .with_stdout_contains(SEARCH_RESULTS) .run(); } @@ -206,7 +241,7 @@ cargo_process("search postgres sql --index") .arg(registry_url().to_string()) - .with_stdout_contains("hoare = \"0.1.1\" # Design by contract style assertions for Rust") + .with_stdout_contains(SEARCH_RESULTS) .run(); } diff -Nru cargo-0.44.1/tests/testsuite/standard_lib.rs cargo-0.47.0/tests/testsuite/standard_lib.rs --- cargo-0.44.1/tests/testsuite/standard_lib.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/standard_lib.rs 2020-07-17 20:39:39.000000000 +0000 @@ -21,6 +21,12 @@ return None; } + if cfg!(all(target_os = "windows", target_env = "gnu")) { + // FIXME: contains object files that we don't handle yet: + // https://github.com/rust-lang/wg-cargo-std-aware/issues/46 + return None; + } + // Our mock sysroot requires a few packages from crates.io, so make sure // they're "published" to crates.io. Also edit their code a bit to make sure // that they have access to our custom crates with custom apis. @@ -297,7 +303,7 @@ r#" #![no_std] pub fn foo() { - assert_eq!(core::u8::MIN, 0); + assert_eq!(u8::MIN, 0); } "#, ) @@ -512,7 +518,7 @@ ) .build(); - p.cargo("test --doc -v") + p.cargo("test --doc -v -Zdoctest-xcompile") .build_std(&setup) .with_stdout_contains("test src/lib.rs - f [..] ... ok") .target_host() @@ -570,3 +576,31 @@ p.cargo("build -v").build_std(&setup).target_host().run(); } + +#[cargo_test] +fn ignores_incremental() { + // Incremental is not really needed for std, make sure it is disabled. + // Incremental also tends to have bugs that affect std libraries more than + // any other crate. + let setup = match setup() { + Some(s) => s, + None => return, + }; + let p = project().file("src/lib.rs", "").build(); + p.cargo("build") + .env("CARGO_INCREMENTAL", "1") + .build_std(&setup) + .target_host() + .run(); + let incremental: Vec<_> = p + .glob(format!("target/{}/debug/incremental/*", rustc_host())) + .map(|e| e.unwrap()) + .collect(); + assert_eq!(incremental.len(), 1); + assert!(incremental[0] + .file_name() + .unwrap() + .to_str() + .unwrap() + .starts_with("foo-")); +} diff -Nru cargo-0.44.1/tests/testsuite/test.rs cargo-0.47.0/tests/testsuite/test.rs --- cargo-0.44.1/tests/testsuite/test.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/test.rs 2020-07-17 20:39:39.000000000 +0000 @@ -129,7 +129,7 @@ use std::panic; pub fn main() { let r = panic::catch_unwind(|| { - [1, i32::max_value()].iter().sum::(); + [1, i32::MAX].iter().sum::(); }); assert!(r.is_err()); }"#, @@ -3360,9 +3360,11 @@ "name":"foo", "src_path":"[..]lib.rs" }, - "filenames":["[..]/foo-[..]"], + "filenames":"{...}", "fresh": false } + + {"reason": "build-finished", "success": true} "#, ) .run(); @@ -3379,7 +3381,7 @@ .with_json( r#" { - "executable": "[..]/foo/target/debug/foo-[..][EXE]", + "executable": "[..]/foo/target/debug/deps/foo-[..][EXE]", "features": [], "filenames": "{...}", "fresh": false, @@ -3395,6 +3397,8 @@ "src_path": "[..]/foo/src/lib.rs" } } + + {"reason": "build-finished", "success": true} "#, ) .run(); @@ -3413,7 +3417,7 @@ .with_json( r#" { - "executable": "[..]/foo/target/debug/integration_test-[..][EXE]", + "executable": "[..]/foo/target/debug/deps/integration_test-[..][EXE]", "features": [], "filenames": "{...}", "fresh": false, @@ -3429,6 +3433,8 @@ "src_path": "[..]/foo/tests/integration_test.rs" } } + + {"reason": "build-finished", "success": true} "#, ) .run(); diff -Nru cargo-0.44.1/tests/testsuite/tree_graph_features.rs cargo-0.47.0/tests/testsuite/tree_graph_features.rs --- cargo-0.44.1/tests/testsuite/tree_graph_features.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/tree_graph_features.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,361 @@ +//! Tests for the `cargo tree` command with -e features option. + +use cargo_test_support::project; +use cargo_test_support::registry::{Dependency, Package}; + +#[cargo_test] +fn dep_feature_various() { + // Checks different ways of setting features via dependencies. + Package::new("optdep", "1.0.0") + .feature("default", &["cat"]) + .feature("cat", &[]) + .publish(); + Package::new("defaultdep", "1.0.0") + .feature("default", &["f1"]) + .feature("f1", &["optdep"]) + .add_dep(Dependency::new("optdep", "1.0").optional(true)) + .publish(); + Package::new("nodefaultdep", "1.0.0") + .feature("default", &["f1"]) + .feature("f1", &[]) + .publish(); + Package::new("nameddep", "1.0.0") + .add_dep(Dependency::new("serde", "1.0").optional(true)) + .feature("default", &["serde-stuff"]) + .feature("serde-stuff", &["serde/derive"]) + .feature("vehicle", &["car"]) + .feature("car", &[]) + .publish(); + Package::new("serde_derive", "1.0.0").publish(); + Package::new("serde", "1.0.0") + .feature("derive", &["serde_derive"]) + .add_dep(Dependency::new("serde_derive", "1.0").optional(true)) + .publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + defaultdep = "1.0" + nodefaultdep = {version="1.0", default-features = false} + nameddep = {version="1.0", features = ["vehicle", "serde"]} + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("tree -e features") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +├── nodefaultdep v1.0.0 +├── defaultdep feature \"default\" +│ ├── defaultdep v1.0.0 +│ │ └── optdep feature \"default\" +│ │ ├── optdep v1.0.0 +│ │ └── optdep feature \"cat\" +│ │ └── optdep v1.0.0 +│ └── defaultdep feature \"f1\" +│ ├── defaultdep v1.0.0 (*) +│ └── defaultdep feature \"optdep\" +│ └── defaultdep v1.0.0 (*) +├── nameddep feature \"default\" +│ ├── nameddep v1.0.0 +│ │ └── serde feature \"default\" +│ │ └── serde v1.0.0 +│ │ └── serde_derive feature \"default\" +│ │ └── serde_derive v1.0.0 +│ └── nameddep feature \"serde-stuff\" +│ ├── nameddep v1.0.0 (*) +│ ├── nameddep feature \"serde\" +│ │ └── nameddep v1.0.0 (*) +│ └── serde feature \"derive\" +│ ├── serde v1.0.0 (*) +│ └── serde feature \"serde_derive\" +│ └── serde v1.0.0 (*) +├── nameddep feature \"serde\" (*) +└── nameddep feature \"vehicle\" + ├── nameddep v1.0.0 (*) + └── nameddep feature \"car\" + └── nameddep v1.0.0 (*) +", + ) + .run(); +} + +#[cargo_test] +fn graph_features_ws_interdependent() { + // A workspace with interdependent crates. + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["a", "b"] + "#, + ) + .file( + "a/Cargo.toml", + r#" + [package] + name = "a" + version = "0.1.0" + + [dependencies] + b = {path="../b", features=["feat2"]} + + [features] + default = ["a1"] + a1 = [] + a2 = [] + "#, + ) + .file("a/src/lib.rs", "") + .file( + "b/Cargo.toml", + r#" + [package] + name = "b" + version = "0.1.0" + + [features] + default = ["feat1"] + feat1 = [] + feat2 = [] + "#, + ) + .file("b/src/lib.rs", "") + .build(); + + p.cargo("tree -e features") + .with_stdout( + "\ +a v0.1.0 ([..]/foo/a) +├── b feature \"default\" (command-line) +│ ├── b v0.1.0 ([..]/foo/b) +│ └── b feature \"feat1\" +│ └── b v0.1.0 ([..]/foo/b) +└── b feature \"feat2\" + └── b v0.1.0 ([..]/foo/b) + +b v0.1.0 ([..]/foo/b) +", + ) + .run(); + + p.cargo("tree -e features -i a -i b") + .with_stdout( + "\ +a v0.1.0 ([..]/foo/a) +├── a feature \"a1\" +│ └── a feature \"default\" (command-line) +└── a feature \"default\" (command-line) + +b v0.1.0 ([..]/foo/b) +├── b feature \"default\" (command-line) +│ └── a v0.1.0 ([..]/foo/a) (*) +├── b feature \"feat1\" +│ └── b feature \"default\" (command-line) (*) +└── b feature \"feat2\" + └── a v0.1.0 ([..]/foo/a) (*) +", + ) + .run(); +} + +#[cargo_test] +fn slash_feature_name() { + // dep_name/feat_name syntax + Package::new("opt", "1.0.0").feature("feat1", &[]).publish(); + Package::new("notopt", "1.0.0") + .feature("cat", &[]) + .feature("animal", &["cat"]) + .publish(); + Package::new("opt2", "1.0.0").publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + opt = {version = "1.0", optional=true} + opt2 = {version = "1.0", optional=true} + notopt = "1.0" + + [features] + f1 = ["opt/feat1", "notopt/animal"] + f2 = ["f1"] + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("tree -e features --features f1") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +├── notopt feature \"default\" +│ └── notopt v1.0.0 +└── opt feature \"default\" + └── opt v1.0.0 +", + ) + .run(); + + p.cargo("tree -e features --features f1 -i foo") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +├── foo feature \"default\" (command-line) +├── foo feature \"f1\" (command-line) +└── foo feature \"opt\" + └── foo feature \"f1\" (command-line) +", + ) + .run(); + + p.cargo("tree -e features --features f1 -i notopt") + .with_stdout( + "\ +notopt v1.0.0 +├── notopt feature \"animal\" +│ └── foo feature \"f1\" (command-line) +├── notopt feature \"cat\" +│ └── notopt feature \"animal\" (*) +└── notopt feature \"default\" + └── foo v0.1.0 ([..]/foo) + ├── foo feature \"default\" (command-line) + ├── foo feature \"f1\" (command-line) + └── foo feature \"opt\" + └── foo feature \"f1\" (command-line) +", + ) + .run(); + + p.cargo("tree -e features --features notopt/animal -i notopt") + .with_stdout( + "\ +notopt v1.0.0 +├── notopt feature \"animal\" (command-line) +├── notopt feature \"cat\" +│ └── notopt feature \"animal\" (command-line) +└── notopt feature \"default\" + └── foo v0.1.0 ([..]/foo) + └── foo feature \"default\" (command-line) +", + ) + .run(); + + p.cargo("tree -e features --all-features") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +├── notopt feature \"default\" +│ └── notopt v1.0.0 +├── opt feature \"default\" +│ └── opt v1.0.0 +└── opt2 feature \"default\" + └── opt2 v1.0.0 +", + ) + .run(); + + p.cargo("tree -e features --all-features -i opt2") + .with_stdout( + "\ +opt2 v1.0.0 +└── opt2 feature \"default\" + └── foo v0.1.0 ([..]/foo) + ├── foo feature \"f1\" (command-line) + │ └── foo feature \"f2\" (command-line) + ├── foo feature \"f2\" (command-line) + ├── foo feature \"opt\" (command-line) + │ └── foo feature \"f1\" (command-line) (*) + └── foo feature \"opt2\" (command-line) +", + ) + .run(); +} + +#[cargo_test] +fn features_enables_inactive_target() { + // Features that enable things on targets that are not enabled. + Package::new("optdep", "1.0.0") + .feature("feat1", &[]) + .publish(); + Package::new("dep1", "1.0.0") + .feature("somefeat", &[]) + .publish(); + Package::new("dep2", "1.0.0") + .add_dep( + Dependency::new("optdep", "1.0.0") + .optional(true) + .target("cfg(whatever)"), + ) + .publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [target.'cfg(whatever)'.dependencies] + optdep = {version="1.0", optional=true} + dep1 = "1.0" + + [dependencies] + dep2 = "1.0" + + [features] + f1 = ["optdep"] + f2 = ["optdep/feat1"] + f3 = ["dep1/somefeat"] + f4 = ["dep2/optdep"] + "#, + ) + .file("src/lib.rs", "") + .build(); + p.cargo("tree -e features") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +└── dep2 feature \"default\" + └── dep2 v1.0.0 +", + ) + .run(); + p.cargo("tree -e features --all-features") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +└── dep2 feature \"default\" + └── dep2 v1.0.0 +", + ) + .run(); + p.cargo("tree -e features --all-features --target=all") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +├── dep1 feature \"default\" +│ └── dep1 v1.0.0 +├── dep2 feature \"default\" +│ └── dep2 v1.0.0 +│ └── optdep feature \"default\" +│ └── optdep v1.0.0 +└── optdep feature \"default\" (*) +", + ) + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/tree.rs cargo-0.47.0/tests/testsuite/tree.rs --- cargo-0.44.1/tests/testsuite/tree.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/tree.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,1438 @@ +//! Tests for the `cargo tree` command. + +use cargo_test_support::cross_compile::alternate; +use cargo_test_support::registry::{Dependency, Package}; +use cargo_test_support::{basic_manifest, git, project, rustc_host, Project}; + +fn make_simple_proj() -> Project { + Package::new("c", "1.0.0").publish(); + Package::new("b", "1.0.0").dep("c", "1.0").publish(); + Package::new("a", "1.0.0").dep("b", "1.0").publish(); + Package::new("bdep", "1.0.0").dep("b", "1.0").publish(); + Package::new("devdep", "1.0.0").dep("b", "1.0.0").publish(); + + project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + a = "1.0" + c = "1.0" + + [build-dependencies] + bdep = "1.0" + + [dev-dependencies] + devdep = "1.0" + "#, + ) + .file("src/lib.rs", "") + .file("build.rs", "fn main() {}") + .build() +} + +#[cargo_test] +fn simple() { + // A simple test with a few different dependencies. + let p = make_simple_proj(); + + p.cargo("tree") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +├── a v1.0.0 +│ └── b v1.0.0 +│ └── c v1.0.0 +└── c v1.0.0 +[build-dependencies] +└── bdep v1.0.0 + └── b v1.0.0 (*) +[dev-dependencies] +└── devdep v1.0.0 + └── b v1.0.0 (*) +", + ) + .run(); + + p.cargo("tree -p bdep") + .with_stdout( + "\ +bdep v1.0.0 +└── b v1.0.0 + └── c v1.0.0 +", + ) + .run(); +} + +#[cargo_test] +fn virtual_workspace() { + // Multiple packages in a virtual workspace. + Package::new("somedep", "1.0.0").publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["a", "b", "c"] + "#, + ) + .file("a/Cargo.toml", &basic_manifest("a", "1.0.0")) + .file("a/src/lib.rs", "") + .file( + "b/Cargo.toml", + r#" + [package] + name = "b" + version = "0.1.0" + + [dependencies] + c = { path = "../c" } + somedep = "1.0" + "#, + ) + .file("b/src/lib.rs", "") + .file("c/Cargo.toml", &basic_manifest("c", "1.0.0")) + .file("c/src/lib.rs", "") + .build(); + + p.cargo("tree") + .with_stdout( + "\ +a v1.0.0 ([..]/foo/a) + +b v0.1.0 ([..]/foo/b) +├── c v1.0.0 ([..]/foo/c) +└── somedep v1.0.0 + +c v1.0.0 ([..]/foo/c) +", + ) + .run(); + + p.cargo("tree -p a").with_stdout("a v1.0.0 [..]").run(); + + p.cargo("tree") + .cwd("b") + .with_stdout( + "\ +b v0.1.0 ([..]/foo/b) +├── c v1.0.0 ([..]/foo/c) +└── somedep v1.0.0 +", + ) + .run(); +} + +#[cargo_test] +fn dedupe_edges() { + // Works around https://github.com/rust-lang/cargo/issues/7985 + Package::new("bitflags", "1.0.0").publish(); + Package::new("manyfeat", "1.0.0") + .feature("f1", &[]) + .feature("f2", &[]) + .feature("f3", &[]) + .dep("bitflags", "1.0") + .publish(); + Package::new("a", "1.0.0") + .feature_dep("manyfeat", "1.0", &["f1"]) + .publish(); + Package::new("b", "1.0.0") + .feature_dep("manyfeat", "1.0", &["f2"]) + .publish(); + Package::new("c", "1.0.0") + .feature_dep("manyfeat", "1.0", &["f3"]) + .publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + a = "1.0" + b = "1.0" + c = "1.0" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("tree") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +├── a v1.0.0 +│ └── manyfeat v1.0.0 +│ └── bitflags v1.0.0 +├── b v1.0.0 +│ └── manyfeat v1.0.0 (*) +└── c v1.0.0 + └── manyfeat v1.0.0 (*) +", + ) + .run(); +} + +#[cargo_test] +fn renamed_deps() { + // Handles renamed dependencies. + Package::new("one", "1.0.0").publish(); + Package::new("two", "1.0.0").publish(); + Package::new("bar", "1.0.0").dep("one", "1.0").publish(); + Package::new("bar", "2.0.0").dep("two", "1.0").publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "1.0.0" + + [dependencies] + bar1 = {version = "1.0", package="bar"} + bar2 = {version = "2.0", package="bar"} + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("tree") + .with_stdout( + "\ +foo v1.0.0 ([..]/foo) +├── bar v1.0.0 +│ └── one v1.0.0 +└── bar v2.0.0 + └── two v1.0.0 +", + ) + .run(); +} + +#[cargo_test] +fn source_kinds() { + // Handles git and path sources. + Package::new("regdep", "1.0.0").publish(); + let git_project = git::new("gitdep", |p| { + p.file("Cargo.toml", &basic_manifest("gitdep", "1.0.0")) + .file("src/lib.rs", "") + }); + let p = project() + .file( + "Cargo.toml", + &format!( + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + regdep = "1.0" + pathdep = {{ path = "pathdep" }} + gitdep = {{ git = "{}" }} + "#, + git_project.url() + ), + ) + .file("src/lib.rs", "") + .file("pathdep/Cargo.toml", &basic_manifest("pathdep", "1.0.0")) + .file("pathdep/src/lib.rs", "") + .build(); + + p.cargo("tree") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +├── gitdep v1.0.0 (file://[..]/gitdep#[..]) +├── pathdep v1.0.0 ([..]/foo/pathdep) +└── regdep v1.0.0 +", + ) + .run(); +} + +#[cargo_test] +fn features() { + // Exercises a variety of feature behaviors. + Package::new("optdep_default", "1.0.0").publish(); + Package::new("optdep", "1.0.0").publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "a" + version = "0.1.0" + + [dependencies] + optdep_default = { version = "1.0", optional = true } + optdep = { version = "1.0", optional = true } + + [features] + default = ["optdep_default"] + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("tree") + .with_stdout( + "\ +a v0.1.0 ([..]/foo) +└── optdep_default v1.0.0 +", + ) + .run(); + + p.cargo("tree --no-default-features") + .with_stdout( + "\ +a v0.1.0 ([..]/foo) +", + ) + .run(); + + p.cargo("tree --all-features") + .with_stdout( + "\ +a v0.1.0 ([..]/foo) +├── optdep v1.0.0 +└── optdep_default v1.0.0 +", + ) + .run(); + + p.cargo("tree --features optdep") + .with_stdout( + "\ +a v0.1.0 ([..]/foo) +├── optdep v1.0.0 +└── optdep_default v1.0.0 +", + ) + .run(); +} + +#[cargo_test] +fn filters_target() { + // --target flag + Package::new("targetdep", "1.0.0").publish(); + Package::new("hostdep", "1.0.0").publish(); + Package::new("devdep", "1.0.0").publish(); + Package::new("build_target_dep", "1.0.0").publish(); + Package::new("build_host_dep", "1.0.0") + .target_dep("targetdep", "1.0", alternate()) + .target_dep("hostdep", "1.0", &rustc_host()) + .publish(); + Package::new("pm_target", "1.0.0") + .proc_macro(true) + .publish(); + Package::new("pm_host", "1.0.0").proc_macro(true).publish(); + + let p = project() + .file( + "Cargo.toml", + &format!( + r#" + [package] + name = "foo" + version = "0.1.0" + + [target.'{alt}'.dependencies] + targetdep = "1.0" + pm_target = "1.0" + + [target.'{host}'.dependencies] + hostdep = "1.0" + pm_host = "1.0" + + [target.'{alt}'.dev-dependencies] + devdep = "1.0" + + [target.'{alt}'.build-dependencies] + build_target_dep = "1.0" + + [target.'{host}'.build-dependencies] + build_host_dep = "1.0" + "#, + alt = alternate(), + host = rustc_host() + ), + ) + .file("src/lib.rs", "") + .file("build.rs", "fn main() {}") + .build(); + + p.cargo("tree") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +├── hostdep v1.0.0 +└── pm_host v1.0.0 +[build-dependencies] +└── build_host_dep v1.0.0 + └── hostdep v1.0.0 +", + ) + .run(); + + p.cargo("tree --target") + .arg(alternate()) + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +├── pm_target v1.0.0 +└── targetdep v1.0.0 +[build-dependencies] +└── build_host_dep v1.0.0 + └── hostdep v1.0.0 +[dev-dependencies] +└── devdep v1.0.0 +", + ) + .run(); + + p.cargo("tree --target") + .arg(rustc_host()) + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +├── hostdep v1.0.0 +└── pm_host v1.0.0 +[build-dependencies] +└── build_host_dep v1.0.0 + └── hostdep v1.0.0 +", + ) + .run(); + + p.cargo("tree --target=all") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +├── hostdep v1.0.0 +├── pm_host v1.0.0 +├── pm_target v1.0.0 +└── targetdep v1.0.0 +[build-dependencies] +├── build_host_dep v1.0.0 +│ ├── hostdep v1.0.0 +│ └── targetdep v1.0.0 +└── build_target_dep v1.0.0 +[dev-dependencies] +└── devdep v1.0.0 +", + ) + .run(); +} + +#[cargo_test] +fn dep_kinds() { + Package::new("inner-devdep", "1.0.0").publish(); + Package::new("inner-builddep", "1.0.0").publish(); + Package::new("inner-normal", "1.0.0").publish(); + Package::new("normaldep", "1.0.0") + .dep("inner-normal", "1.0") + .dev_dep("inner-devdep", "1.0") + .build_dep("inner-builddep", "1.0") + .publish(); + Package::new("devdep", "1.0.0") + .dep("inner-normal", "1.0") + .dev_dep("inner-devdep", "1.0") + .build_dep("inner-builddep", "1.0") + .publish(); + Package::new("builddep", "1.0.0") + .dep("inner-normal", "1.0") + .dev_dep("inner-devdep", "1.0") + .build_dep("inner-builddep", "1.0") + .publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + normaldep = "1.0" + + [dev-dependencies] + devdep = "1.0" + + [build-dependencies] + builddep = "1.0" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("tree") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +└── normaldep v1.0.0 + └── inner-normal v1.0.0 + [build-dependencies] + └── inner-builddep v1.0.0 +[build-dependencies] +└── builddep v1.0.0 + └── inner-normal v1.0.0 + [build-dependencies] + └── inner-builddep v1.0.0 +[dev-dependencies] +└── devdep v1.0.0 + └── inner-normal v1.0.0 + [build-dependencies] + └── inner-builddep v1.0.0 +", + ) + .run(); + + p.cargo("tree -e no-dev") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +└── normaldep v1.0.0 + └── inner-normal v1.0.0 + [build-dependencies] + └── inner-builddep v1.0.0 +[build-dependencies] +└── builddep v1.0.0 + └── inner-normal v1.0.0 + [build-dependencies] + └── inner-builddep v1.0.0 +", + ) + .run(); + + p.cargo("tree -e normal") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +└── normaldep v1.0.0 + └── inner-normal v1.0.0 +", + ) + .run(); + + p.cargo("tree -e dev,build") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +[build-dependencies] +└── builddep v1.0.0 + [build-dependencies] + └── inner-builddep v1.0.0 +[dev-dependencies] +└── devdep v1.0.0 + [build-dependencies] + └── inner-builddep v1.0.0 +", + ) + .run(); +} + +#[cargo_test] +fn cyclic_dev_dep() { + // Cyclical dev-dependency and inverse flag. + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dev-dependencies] + dev-dep = { path = "dev-dep" } + "#, + ) + .file("src/lib.rs", "") + .file( + "dev-dep/Cargo.toml", + r#" + [package] + name = "dev-dep" + version = "0.1.0" + + [dependencies] + foo = { path=".." } + "#, + ) + .file("dev-dep/src/lib.rs", "") + .build(); + + p.cargo("tree") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +[dev-dependencies] +└── dev-dep v0.1.0 ([..]/foo/dev-dep) + └── foo v0.1.0 ([..]/foo) (*) +", + ) + .run(); + + p.cargo("tree --invert foo") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +└── dev-dep v0.1.0 ([..]/foo/dev-dep) + [dev-dependencies] + └── foo v0.1.0 ([..]/foo) (*) +", + ) + .run(); +} + +#[cargo_test] +fn invert() { + Package::new("b1", "1.0.0").dep("c", "1.0").publish(); + Package::new("b2", "1.0.0").dep("d", "1.0").publish(); + Package::new("c", "1.0.0").publish(); + Package::new("d", "1.0.0").publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + b1 = "1.0" + b2 = "1.0" + c = "1.0" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("tree") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +├── b1 v1.0.0 +│ └── c v1.0.0 +├── b2 v1.0.0 +│ └── d v1.0.0 +└── c v1.0.0 +", + ) + .run(); + + p.cargo("tree --invert c") + .with_stdout( + "\ +c v1.0.0 +├── b1 v1.0.0 +│ └── foo v0.1.0 ([..]/foo) +└── foo v0.1.0 ([..]/foo) +", + ) + .run(); +} + +#[cargo_test] +fn invert_with_build_dep() { + // -i for a common dependency between normal and build deps. + Package::new("common", "1.0.0").publish(); + Package::new("bdep", "1.0.0").dep("common", "1.0").publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + common = "1.0" + + [build-dependencies] + bdep = "1.0" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("tree") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +└── common v1.0.0 +[build-dependencies] +└── bdep v1.0.0 + └── common v1.0.0 +", + ) + .run(); + + p.cargo("tree -i common") + .with_stdout( + "\ +common v1.0.0 +├── bdep v1.0.0 +│ [build-dependencies] +│ └── foo v0.1.0 ([..]/foo) +└── foo v0.1.0 ([..]/foo) +", + ) + .run(); +} + +#[cargo_test] +fn no_indent() { + let p = make_simple_proj(); + + p.cargo("tree --prefix=none") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +a v1.0.0 +b v1.0.0 +c v1.0.0 +c v1.0.0 +bdep v1.0.0 +b v1.0.0 (*) +devdep v1.0.0 +b v1.0.0 (*) +", + ) + .run(); +} + +#[cargo_test] +fn prefix_depth() { + let p = make_simple_proj(); + + p.cargo("tree --prefix=depth") + .with_stdout( + "\ +0foo v0.1.0 ([..]/foo) +1a v1.0.0 +2b v1.0.0 +3c v1.0.0 +1c v1.0.0 +1bdep v1.0.0 +2b v1.0.0 (*) +1devdep v1.0.0 +2b v1.0.0 (*) +", + ) + .run(); +} + +#[cargo_test] +fn no_dedupe() { + let p = make_simple_proj(); + + p.cargo("tree --no-dedupe") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +├── a v1.0.0 +│ └── b v1.0.0 +│ └── c v1.0.0 +└── c v1.0.0 +[build-dependencies] +└── bdep v1.0.0 + └── b v1.0.0 + └── c v1.0.0 +[dev-dependencies] +└── devdep v1.0.0 + └── b v1.0.0 + └── c v1.0.0 +", + ) + .run(); +} + +#[cargo_test] +fn no_dedupe_cycle() { + // --no-dedupe with a dependency cycle + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dev-dependencies] + bar = {path = "bar"} + "#, + ) + .file("src/lib.rs", "") + .file( + "bar/Cargo.toml", + r#" + [package] + name = "bar" + version = "0.1.0" + + [dependencies] + foo = {path=".."} + "#, + ) + .file("bar/src/lib.rs", "") + .build(); + + p.cargo("tree") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +[dev-dependencies] +└── bar v0.1.0 ([..]/foo/bar) + └── foo v0.1.0 ([..]/foo) (*) +", + ) + .run(); + + p.cargo("tree --no-dedupe") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +[dev-dependencies] +└── bar v0.1.0 ([..]/foo/bar) + └── foo v0.1.0 ([..]/foo) (*) +", + ) + .run(); +} + +#[cargo_test] +fn duplicates() { + Package::new("dog", "1.0.0").publish(); + Package::new("dog", "2.0.0").publish(); + Package::new("cat", "1.0.0").publish(); + Package::new("cat", "2.0.0").publish(); + Package::new("dep", "1.0.0") + .dep("dog", "1.0") + .dep("cat", "1.0") + .publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["a", "b"] + "#, + ) + .file( + "a/Cargo.toml", + r#" + [package] + name = "a" + version = "0.1.0" + + [dependencies] + dog1 = { version = "1.0", package = "dog" } + dog2 = { version = "2.0", package = "dog" } + "#, + ) + .file("a/src/lib.rs", "") + .file( + "b/Cargo.toml", + r#" + [package] + name = "b" + version = "0.1.0" + + [dependencies] + dep = "1.0" + cat = "2.0" + "#, + ) + .file("b/src/lib.rs", "") + .build(); + + p.cargo("tree -p a") + .with_stdout( + "\ +a v0.1.0 ([..]/foo/a) +├── dog v1.0.0 +└── dog v2.0.0 +", + ) + .run(); + + p.cargo("tree -p b") + .with_stdout( + "\ +b v0.1.0 ([..]/foo/b) +├── cat v2.0.0 +└── dep v1.0.0 + ├── cat v1.0.0 + └── dog v1.0.0 +", + ) + .run(); + + p.cargo("tree -p a -d") + .with_stdout( + "\ +dog v1.0.0 +└── a v0.1.0 ([..]/foo/a) + +dog v2.0.0 +└── a v0.1.0 ([..]/foo/a) +", + ) + .run(); + + p.cargo("tree -p b -d") + .with_stdout( + "\ +cat v1.0.0 +└── dep v1.0.0 + └── b v0.1.0 ([..]/foo/b) + +cat v2.0.0 +└── b v0.1.0 ([..]/foo/b) +", + ) + .run(); +} + +#[cargo_test] +fn charset() { + let p = make_simple_proj(); + p.cargo("tree --charset ascii") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +|-- a v1.0.0 +| `-- b v1.0.0 +| `-- c v1.0.0 +`-- c v1.0.0 +[build-dependencies] +`-- bdep v1.0.0 + `-- b v1.0.0 (*) +[dev-dependencies] +`-- devdep v1.0.0 + `-- b v1.0.0 (*) +", + ) + .run(); +} + +#[cargo_test] +fn format() { + Package::new("dep", "1.0.0").publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + license = "MIT" + repository = "https://github.com/rust-lang/cargo" + + [dependencies] + dep = {version="1.0", optional=true} + + [features] + default = ["foo"] + foo = ["bar"] + bar = [] + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("tree --format <<<{p}>>>") + .with_stdout("<<>>") + .run(); + + p.cargo("tree --format {}") + .with_stderr( + "\ +[ERROR] tree format `{}` not valid + +Caused by: + unsupported pattern `` +", + ) + .with_status(101) + .run(); + + p.cargo("tree --format {p}-{{hello}}") + .with_stdout("foo v0.1.0 ([..]/foo)-{hello}") + .run(); + + p.cargo("tree --format") + .arg("{p} {l} {r}") + .with_stdout("foo v0.1.0 ([..]/foo) MIT https://github.com/rust-lang/cargo") + .run(); + + p.cargo("tree --format") + .arg("{p} {f}") + .with_stdout("foo v0.1.0 ([..]/foo) bar,default,foo") + .run(); + + p.cargo("tree --all-features --format") + .arg("{p} [{f}]") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) [bar,default,dep,foo] +└── dep v1.0.0 [] +", + ) + .run(); +} + +#[cargo_test] +fn dev_dep_feature() { + // -Zfeatures=dev_dep with optional dep + Package::new("optdep", "1.0.0").publish(); + Package::new("bar", "1.0.0") + .add_dep(Dependency::new("optdep", "1.0").optional(true)) + .publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dev-dependencies] + bar = { version = "1.0", features = ["optdep"] } + + [dependencies] + bar = "1.0" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("tree") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +└── bar v1.0.0 + └── optdep v1.0.0 +[dev-dependencies] +└── bar v1.0.0 (*) +", + ) + .run(); + + p.cargo("tree -e normal") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +└── bar v1.0.0 + └── optdep v1.0.0 +", + ) + .run(); + + p.cargo("tree -Zfeatures=dev_dep") + .masquerade_as_nightly_cargo() + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +└── bar v1.0.0 + └── optdep v1.0.0 +[dev-dependencies] +└── bar v1.0.0 (*) +", + ) + .run(); + + p.cargo("tree -e normal -Zfeatures=dev_dep") + .masquerade_as_nightly_cargo() + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +└── bar v1.0.0 +", + ) + .run(); +} + +#[cargo_test] +fn host_dep_feature() { + // -Zfeatures=host_dep with optional dep + Package::new("optdep", "1.0.0").publish(); + Package::new("bar", "1.0.0") + .add_dep(Dependency::new("optdep", "1.0").optional(true)) + .publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [build-dependencies] + bar = { version = "1.0", features = ["optdep"] } + + [dependencies] + bar = "1.0" + "#, + ) + .file("src/lib.rs", "") + .file("build.rs", "fn main() {}") + .build(); + + p.cargo("tree") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +└── bar v1.0.0 + └── optdep v1.0.0 +[build-dependencies] +└── bar v1.0.0 (*) +", + ) + .run(); + + p.cargo("tree -Zfeatures=host_dep") + .masquerade_as_nightly_cargo() + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +└── bar v1.0.0 +[build-dependencies] +└── bar v1.0.0 + └── optdep v1.0.0 +", + ) + .run(); + + // -p + p.cargo("tree -p bar") + .with_stdout( + "\ +bar v1.0.0 +└── optdep v1.0.0 +", + ) + .run(); + + p.cargo("tree -p bar -Zfeatures=host_dep") + .masquerade_as_nightly_cargo() + .with_stdout( + "\ +bar v1.0.0 + +bar v1.0.0 +└── optdep v1.0.0 +", + ) + .run(); + + // invert + p.cargo("tree -i optdep") + .with_stdout( + "\ +optdep v1.0.0 +└── bar v1.0.0 + └── foo v0.1.0 ([..]/foo) + [build-dependencies] + └── foo v0.1.0 ([..]/foo) +", + ) + .run(); + + p.cargo("tree -i optdep -Zfeatures=host_dep") + .masquerade_as_nightly_cargo() + .with_stdout( + "\ +optdep v1.0.0 +└── bar v1.0.0 + [build-dependencies] + └── foo v0.1.0 ([..]/foo) +", + ) + .run(); + + // Check that -d handles duplicates with features. + p.cargo("tree -d -Zfeatures=host_dep") + .masquerade_as_nightly_cargo() + .with_stdout( + "\ +bar v1.0.0 +└── foo v0.1.0 ([..]/foo) + +bar v1.0.0 +[build-dependencies] +└── foo v0.1.0 ([..]/foo) +", + ) + .run(); +} + +#[cargo_test] +fn proc_macro_features() { + // -Zfeatures=host_dep with a proc-macro + Package::new("optdep", "1.0.0").publish(); + Package::new("somedep", "1.0.0") + .add_dep(Dependency::new("optdep", "1.0").optional(true)) + .publish(); + Package::new("pm", "1.0.0") + .proc_macro(true) + .feature_dep("somedep", "1.0", &["optdep"]) + .publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + pm = "1.0" + somedep = "1.0" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("tree") + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +├── pm v1.0.0 +│ └── somedep v1.0.0 +│ └── optdep v1.0.0 +└── somedep v1.0.0 (*) +", + ) + .run(); + + // Note the missing (*) + p.cargo("tree -Zfeatures=host_dep") + .masquerade_as_nightly_cargo() + .with_stdout( + "\ +foo v0.1.0 ([..]/foo) +├── pm v1.0.0 +│ └── somedep v1.0.0 +│ └── optdep v1.0.0 +└── somedep v1.0.0 +", + ) + .run(); + + // -p + p.cargo("tree -p somedep") + .with_stdout( + "\ +somedep v1.0.0 +└── optdep v1.0.0 +", + ) + .run(); + + p.cargo("tree -p somedep -Zfeatures=host_dep") + .masquerade_as_nightly_cargo() + .with_stdout( + "\ +somedep v1.0.0 + +somedep v1.0.0 +└── optdep v1.0.0 +", + ) + .run(); + + // invert + p.cargo("tree -i somedep") + .with_stdout( + "\ +somedep v1.0.0 +├── foo v0.1.0 ([..]/foo) +└── pm v1.0.0 + └── foo v0.1.0 ([..]/foo) +", + ) + .run(); + + p.cargo("tree -i somedep -Zfeatures=host_dep") + .masquerade_as_nightly_cargo() + .with_stdout( + "\ +somedep v1.0.0 +└── foo v0.1.0 ([..]/foo) + +somedep v1.0.0 +└── pm v1.0.0 + └── foo v0.1.0 ([..]/foo) +", + ) + .run(); +} + +#[cargo_test] +fn itarget_opt_dep() { + // -Zfeatures=itarget with optional dep + Package::new("optdep", "1.0.0").publish(); + Package::new("common", "1.0.0") + .add_dep(Dependency::new("optdep", "1.0").optional(true)) + .publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "1.0.0" + + [dependencies] + common = "1.0" + + [target.'cfg(whatever)'.dependencies] + common = { version = "1.0", features = ["optdep"] } + + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("tree") + .with_stdout( + "\ +foo v1.0.0 ([..]/foo) +└── common v1.0.0 + └── optdep v1.0.0 +", + ) + .run(); + + p.cargo("tree -Zfeatures=itarget") + .with_stdout( + "\ +foo v1.0.0 ([..]/foo) +└── common v1.0.0 +", + ) + .masquerade_as_nightly_cargo() + .run(); +} + +#[cargo_test] +fn ambiguous_name() { + // -p that is ambiguous. + Package::new("dep", "1.0.0").publish(); + Package::new("dep", "2.0.0").publish(); + Package::new("bar", "1.0.0").dep("dep", "2.0").publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + dep = "1.0" + bar = "1.0" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("tree -p dep") + .with_stderr_contains( + "\ +error: There are multiple `dep` packages in your project, and the specification `dep` is ambiguous. +Please re-run this command with `-p ` where `` is one of the following: + dep:1.0.0 + dep:2.0.0 +", + ) + .with_status(101) + .run(); +} + +#[cargo_test] +fn workspace_features_are_local() { + // The features for workspace packages should be the same as `cargo build` + // (i.e., the features selected depend on the "current" package). + Package::new("optdep", "1.0.0").publish(); + Package::new("somedep", "1.0.0") + .add_dep(Dependency::new("optdep", "1.0").optional(true)) + .publish(); + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["a", "b"] + "#, + ) + .file( + "a/Cargo.toml", + r#" + [package] + name = "a" + version = "0.1.0" + + [dependencies] + somedep = {version="1.0", features=["optdep"]} + "#, + ) + .file("a/src/lib.rs", "") + .file( + "b/Cargo.toml", + r#" + [package] + name = "b" + version = "0.1.0" + + [dependencies] + somedep = "1.0" + "#, + ) + .file("b/src/lib.rs", "") + .build(); + + p.cargo("tree") + .with_stdout( + "\ +a v0.1.0 ([..]/foo/a) +└── somedep v1.0.0 + └── optdep v1.0.0 + +b v0.1.0 ([..]/foo/b) +└── somedep v1.0.0 (*) +", + ) + .run(); + + p.cargo("tree -p a") + .with_stdout( + "\ +a v0.1.0 ([..]/foo/a) +└── somedep v1.0.0 + └── optdep v1.0.0 +", + ) + .run(); + + p.cargo("tree -p b") + .with_stdout( + "\ +b v0.1.0 ([..]/foo/b) +└── somedep v1.0.0 +", + ) + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/unit_graph.rs cargo-0.47.0/tests/testsuite/unit_graph.rs --- cargo-0.44.1/tests/testsuite/unit_graph.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/unit_graph.rs 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1,215 @@ +//! Tests for --unit-graph option. + +use cargo_test_support::project; +use cargo_test_support::registry::Package; + +#[cargo_test] +fn gated() { + let p = project().file("src/lib.rs", "").build(); + p.cargo("build --unit-graph") + .with_status(101) + .with_stderr( + "\ +[ERROR] the `--unit-graph` flag is unstable[..] +See [..] +See [..] +", + ) + .run(); +} + +#[cargo_test] +fn simple() { + Package::new("a", "1.0.0") + .dep("b", "1.0") + .feature("feata", &["b/featb"]) + .publish(); + Package::new("b", "1.0.0") + .dep("c", "1.0") + .feature("featb", &["c/featc"]) + .publish(); + Package::new("c", "1.0.0").feature("featc", &[]).publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + + [dependencies] + a = "1.0" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("build --features a/feata --unit-graph -Zunstable-options") + .masquerade_as_nightly_cargo() + .with_json( + r#"{ + "version": 1, + "units": [ + { + "pkg_id": "a 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "target": { + "kind": [ + "lib" + ], + "crate_types": [ + "lib" + ], + "name": "a", + "src_path": "[..]/a-1.0.0/src/lib.rs", + "edition": "2015", + "doctest": true + }, + "profile": { + "name": "dev", + "opt_level": "0", + "lto": "false", + "codegen_units": null, + "debuginfo": 2, + "debug_assertions": true, + "overflow_checks": true, + "rpath": false, + "incremental": false, + "panic": "unwind", + "strip": "none" + }, + "platform": null, + "mode": "build", + "features": [ + "feata" + ], + "dependencies": [ + { + "index": 1, + "extern_crate_name": "b", + "public": false, + "noprelude": false + } + ] + }, + { + "pkg_id": "b 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "target": { + "kind": [ + "lib" + ], + "crate_types": [ + "lib" + ], + "name": "b", + "src_path": "[..]/b-1.0.0/src/lib.rs", + "edition": "2015", + "doctest": true + }, + "profile": { + "name": "dev", + "opt_level": "0", + "lto": "false", + "codegen_units": null, + "debuginfo": 2, + "debug_assertions": true, + "overflow_checks": true, + "rpath": false, + "incremental": false, + "panic": "unwind", + "strip": "none" + }, + "platform": null, + "mode": "build", + "features": [ + "featb" + ], + "dependencies": [ + { + "index": 2, + "extern_crate_name": "c", + "public": false, + "noprelude": false + } + ] + }, + { + "pkg_id": "c 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "target": { + "kind": [ + "lib" + ], + "crate_types": [ + "lib" + ], + "name": "c", + "src_path": "[..]/c-1.0.0/src/lib.rs", + "edition": "2015", + "doctest": true + }, + "profile": { + "name": "dev", + "opt_level": "0", + "lto": "false", + "codegen_units": null, + "debuginfo": 2, + "debug_assertions": true, + "overflow_checks": true, + "rpath": false, + "incremental": false, + "panic": "unwind", + "strip": "none" + }, + "platform": null, + "mode": "build", + "features": [ + "featc" + ], + "dependencies": [] + }, + { + "pkg_id": "foo 0.1.0 (path+file://[..]/foo)", + "target": { + "kind": [ + "lib" + ], + "crate_types": [ + "lib" + ], + "name": "foo", + "src_path": "[..]/foo/src/lib.rs", + "edition": "2015", + "doctest": true + }, + "profile": { + "name": "dev", + "opt_level": "0", + "lto": "false", + "codegen_units": null, + "debuginfo": 2, + "debug_assertions": true, + "overflow_checks": true, + "rpath": false, + "incremental": false, + "panic": "unwind", + "strip": "none" + }, + "platform": null, + "mode": "build", + "features": [], + "dependencies": [ + { + "index": 0, + "extern_crate_name": "a", + "public": false, + "noprelude": false + } + ] + } + ], + "roots": [3] +} +"#, + ) + .run(); +} diff -Nru cargo-0.44.1/tests/testsuite/update.rs cargo-0.47.0/tests/testsuite/update.rs --- cargo-0.44.1/tests/testsuite/update.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/update.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,8 +1,5 @@ //! Tests for the `cargo update` command. -use std::fs::File; -use std::io::prelude::*; - use cargo_test_support::registry::Package; use cargo_test_support::{basic_manifest, project}; @@ -42,20 +39,18 @@ p.cargo("build").run(); Package::new("log", "0.1.1").publish(); - File::create(p.root().join("foo/Cargo.toml")) - .unwrap() - .write_all( - br#" - [package] - name = "foo" - version = "0.0.1" - authors = [] - - [dependencies] - log = "0.1.1" - "#, - ) - .unwrap(); + p.change_file( + "foo/Cargo.toml", + r#" + [package] + name = "foo" + version = "0.0.1" + authors = [] + + [dependencies] + log = "0.1.1" + "#, + ); p.cargo("build").run(); } @@ -554,7 +549,8 @@ "workspace_members": [ "bar 0.0.1 (path+file://[..]/foo)" ], - "workspace_root": "[..]/foo" + "workspace_root": "[..]/foo", + "metadata": null }"#, ) .run(); @@ -630,7 +626,7 @@ .build(); p.cargo("build").run(); - let old_lockfile = p.read_file("Cargo.lock"); + let old_lockfile = p.read_lockfile(); Package::new("log", "0.1.1").publish(); Package::new("serde", "0.1.1").dep("log", "0.1").publish(); @@ -644,6 +640,6 @@ ", ) .run(); - let new_lockfile = p.read_file("Cargo.lock"); + let new_lockfile = p.read_lockfile(); assert_eq!(old_lockfile, new_lockfile) } diff -Nru cargo-0.44.1/tests/testsuite/vendor.rs cargo-0.47.0/tests/testsuite/vendor.rs --- cargo-0.44.1/tests/testsuite/vendor.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/vendor.rs 2020-07-17 20:39:39.000000000 +0000 @@ -504,8 +504,8 @@ Caused by: found duplicate version of package `b v0.5.0` vendored from two sources: -source 1: [..] -source 2: [..] + source 1: [..] + source 2: [..] ", ) .with_status(101) diff -Nru cargo-0.44.1/tests/testsuite/workspaces.rs cargo-0.47.0/tests/testsuite/workspaces.rs --- cargo-0.44.1/tests/testsuite/workspaces.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/workspaces.rs 2020-07-17 20:39:39.000000000 +0000 @@ -1,12 +1,9 @@ //! Tests for workspaces. -use std::env; -use std::fs::{self, File}; -use std::io::{Read, Write}; - use cargo_test_support::registry::Package; -use cargo_test_support::{basic_lib_manifest, basic_manifest, git, project}; -use cargo_test_support::{sleep_ms, t}; +use cargo_test_support::{basic_lib_manifest, basic_manifest, git, project, sleep_ms}; +use std::env; +use std::fs; #[cargo_test] fn simple_explicit() { @@ -1037,8 +1034,7 @@ .env("USER", "foo") .with_stderr( "\ -warning: compiling this new crate may not work due to invalid workspace \ -configuration +warning: compiling this new crate may not work due to invalid workspace configuration current package believes it's in a workspace when it's not: current: [..] @@ -1065,8 +1061,10 @@ failed to parse manifest at `[..]foo/Cargo.toml` Caused by: - 0: could not parse input as TOML - 1: expected an equals, found eof at line 1 column 5 + could not parse input as TOML + +Caused by: + expected an equals, found eof at line 1 column 5 Created binary (application) `bar` package ", ) @@ -1112,13 +1110,11 @@ p.cargo("build").run(); - let mut lockfile = String::new(); - t!(t!(File::open(p.root().join("Cargo.lock"))).read_to_string(&mut lockfile)); + let lockfile = p.read_lockfile(); p.cargo("build").cwd("baz").run(); - let mut lockfile2 = String::new(); - t!(t!(File::open(p.root().join("Cargo.lock"))).read_to_string(&mut lockfile2)); + let lockfile2 = p.read_lockfile(); assert_eq!(lockfile, lockfile2); } @@ -1167,8 +1163,7 @@ sleep_ms(1000); - t!(t!(File::create(p.root().join("lib/src/lib.rs"))) - .write_all(br#"pub fn foo() -> u32 { 1 }"#)); + p.change_file("lib/src/lib.rs", "pub fn foo() -> u32 { 1 }"); p.cargo("build").cwd("lib").run(); @@ -2122,7 +2117,7 @@ [ERROR] failed to parse manifest at `[..]/foo/Cargo.toml` Caused by: - virtual manifests do not specify {} + this virtual manifest specifies a {} section, which is not allowed ", key )) diff -Nru cargo-0.44.1/tests/testsuite/yank.rs cargo-0.47.0/tests/testsuite/yank.rs --- cargo-0.44.1/tests/testsuite/yank.rs 2020-05-04 02:09:09.000000000 +0000 +++ cargo-0.47.0/tests/testsuite/yank.rs 2020-07-17 20:39:39.000000000 +0000 @@ -4,10 +4,10 @@ use cargo_test_support::paths::CargoPathExt; use cargo_test_support::project; -use cargo_test_support::registry::{self, api_path, registry_url}; +use cargo_test_support::registry; fn setup(name: &str, version: &str) { - let dir = api_path().join(format!("api/v1/crates/{}/{}", name, version)); + let dir = registry::api_path().join(format!("api/v1/crates/{}/{}", name, version)); dir.mkdir_p(); fs::write(dir.join("yank"), r#"{"ok": true}"#).unwrap(); } @@ -32,12 +32,9 @@ .file("src/main.rs", "fn main() {}") .build(); - p.cargo("yank --vers 0.0.1 --index") - .arg(registry_url().to_string()) - .run(); + p.cargo("yank --vers 0.0.1 --token sekrit").run(); - p.cargo("yank --undo --vers 0.0.1 --index") - .arg(registry_url().to_string()) + p.cargo("yank --undo --vers 0.0.1 --token sekrit") .with_status(101) .with_stderr( " Updating `[..]` index diff -Nru cargo-0.44.1/triagebot.toml cargo-0.47.0/triagebot.toml --- cargo-0.44.1/triagebot.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/triagebot.toml 2020-07-17 20:39:39.000000000 +0000 @@ -0,0 +1 @@ +[assign] diff -Nru cargo-0.44.1/vendor/adler/benches/bench.rs cargo-0.47.0/vendor/adler/benches/bench.rs --- cargo-0.44.1/vendor/adler/benches/bench.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/adler/benches/bench.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,109 @@ +extern crate adler; +extern crate criterion; + +use adler::{adler32_slice, Adler32}; +use criterion::{criterion_group, criterion_main, Criterion, Throughput}; + +fn simple(c: &mut Criterion) { + { + const SIZE: usize = 100; + + let mut group = c.benchmark_group("simple-100b"); + group.throughput(Throughput::Bytes(SIZE as u64)); + group.bench_function("zeroes-100", |bencher| { + bencher.iter(|| { + adler32_slice(&[0; SIZE]); + }); + }); + group.bench_function("ones-100", |bencher| { + bencher.iter(|| { + adler32_slice(&[0xff; SIZE]); + }); + }); + } + + { + const SIZE: usize = 1024; + + let mut group = c.benchmark_group("simple-1k"); + group.throughput(Throughput::Bytes(SIZE as u64)); + + group.bench_function("zeroes-1k", |bencher| { + bencher.iter(|| { + adler32_slice(&[0; SIZE]); + }); + }); + + group.bench_function("ones-1k", |bencher| { + bencher.iter(|| { + adler32_slice(&[0xff; SIZE]); + }); + }); + } + + { + const SIZE: usize = 1024 * 1024; + + let mut group = c.benchmark_group("simple-1m"); + group.throughput(Throughput::Bytes(SIZE as u64)); + group.bench_function("zeroes-1m", |bencher| { + bencher.iter(|| { + adler32_slice(&[0; SIZE]); + }); + }); + + group.bench_function("ones-1m", |bencher| { + bencher.iter(|| { + adler32_slice(&[0xff; SIZE]); + }); + }); + } +} + +fn chunked(c: &mut Criterion) { + const SIZE: usize = 16 * 1024 * 1024; + + let data = vec![0xAB; SIZE]; + + let mut group = c.benchmark_group("chunked-16m"); + group.throughput(Throughput::Bytes(SIZE as u64)); + group.bench_function("5552", |bencher| { + bencher.iter(|| { + let mut h = Adler32::new(); + for chunk in data.chunks(5552) { + h.write_slice(chunk); + } + h.checksum() + }); + }); + group.bench_function("8k", |bencher| { + bencher.iter(|| { + let mut h = Adler32::new(); + for chunk in data.chunks(8 * 1024) { + h.write_slice(chunk); + } + h.checksum() + }); + }); + group.bench_function("64k", |bencher| { + bencher.iter(|| { + let mut h = Adler32::new(); + for chunk in data.chunks(64 * 1024) { + h.write_slice(chunk); + } + h.checksum() + }); + }); + group.bench_function("1m", |bencher| { + bencher.iter(|| { + let mut h = Adler32::new(); + for chunk in data.chunks(1024 * 1024) { + h.write_slice(chunk); + } + h.checksum() + }); + }); +} + +criterion_group!(benches, simple, chunked); +criterion_main!(benches); diff -Nru cargo-0.44.1/vendor/adler/.cargo-checksum.json cargo-0.47.0/vendor/adler/.cargo-checksum.json --- cargo-0.44.1/vendor/adler/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/adler/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1 @@ +{"files":{},"package":"ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/adler/Cargo.toml cargo-0.47.0/vendor/adler/Cargo.toml --- cargo-0.44.1/vendor/adler/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/adler/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,69 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "adler" +version = "0.2.3" +authors = ["Jonas Schievink "] +description = "A simple clean-room implementation of the Adler-32 checksum" +documentation = "https://docs.rs/adler/" +readme = "README.md" +keywords = ["checksum", "integrity", "hash", "adler32"] +categories = ["algorithms"] +license = "0BSD OR MIT OR Apache-2.0" +repository = "https://github.com/jonas-schievink/adler.git" +[package.metadata.docs.rs] +rustdoc-args = ["--cfg docsrs"] + +[package.metadata.release] +no-dev-version = true +pre-release-commit-message = "Release {{version}}" +tag-message = "{{version}}" + +[[package.metadata.release.pre-release-replacements]] +file = "CHANGELOG.md" +replace = "## Unreleased\n\nNo changes.\n\n## [{{version}} - {{date}}](https://github.com/jonas-schievink/adler/releases/tag/v{{version}})\n" +search = "## Unreleased\n" + +[[package.metadata.release.pre-release-replacements]] +file = "README.md" +replace = "adler = \"{{version}}\"" +search = "adler = \"[a-z0-9\\\\.-]+\"" + +[[package.metadata.release.pre-release-replacements]] +file = "src/lib.rs" +replace = "https://docs.rs/adler/{{version}}" +search = "https://docs.rs/adler/[a-z0-9\\.-]+" + +[[bench]] +name = "bench" +harness = false +[dependencies.compiler_builtins] +version = "0.1.2" +optional = true + +[dependencies.core] +version = "1.0.0" +optional = true +package = "rustc-std-workspace-core" +[dev-dependencies.criterion] +version = "0.3.2" + +[features] +default = ["std"] +rustc-dep-of-std = ["core", "compiler_builtins"] +std = [] +[badges.maintenance] +status = "actively-developed" + +[badges.travis-ci] +repository = "jonas-schievink/adler" diff -Nru cargo-0.44.1/vendor/adler/CHANGELOG.md cargo-0.47.0/vendor/adler/CHANGELOG.md --- cargo-0.44.1/vendor/adler/CHANGELOG.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/adler/CHANGELOG.md 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,33 @@ +# Changelog + +## Unreleased + +No changes. + +## [0.2.3 - 2020-07-11](https://github.com/jonas-schievink/adler/releases/tag/v0.2.3) + +- Process 4 Bytes at a time, improving performance by up to 50% ([#2]). + +## [0.2.2 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.2) + +- Bump MSRV to 1.31.0. + +## [0.2.1 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.1) + +- Add a few `#[inline]` annotations to small functions. +- Fix CI badge. +- Allow integration into libstd. + +## [0.2.0 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.0) + +- Support `#![no_std]` when using `default-features = false`. +- Improve performance by around 7x. +- Support Rust 1.8.0. +- Improve API naming. + +## [0.1.0 - 2020-06-26](https://github.com/jonas-schievink/adler/releases/tag/v0.1.0) + +Initial release. + + +[#2]: https://github.com/jonas-schievink/adler/pull/2 diff -Nru cargo-0.44.1/vendor/adler/LICENSE-0BSD cargo-0.47.0/vendor/adler/LICENSE-0BSD --- cargo-0.44.1/vendor/adler/LICENSE-0BSD 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/adler/LICENSE-0BSD 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,12 @@ +Copyright (C) Jonas Schievink + +Permission to use, copy, modify, and/or distribute this software for +any purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN +AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff -Nru cargo-0.44.1/vendor/adler/LICENSE-APACHE cargo-0.47.0/vendor/adler/LICENSE-APACHE --- cargo-0.44.1/vendor/adler/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/adler/LICENSE-APACHE 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/LICENSE-2.0 + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff -Nru cargo-0.44.1/vendor/adler/LICENSE-MIT cargo-0.47.0/vendor/adler/LICENSE-MIT --- cargo-0.44.1/vendor/adler/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/adler/LICENSE-MIT 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.44.1/vendor/adler/README.md cargo-0.47.0/vendor/adler/README.md --- cargo-0.44.1/vendor/adler/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/adler/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,38 @@ +# Adler-32 checksums for Rust + +[![crates.io](https://img.shields.io/crates/v/adler.svg)](https://crates.io/crates/adler) +[![docs.rs](https://docs.rs/adler/badge.svg)](https://docs.rs/adler/) +![CI](https://github.com/jonas-schievink/adler/workflows/CI/badge.svg) + +This crate provides a simple implementation of the Adler-32 checksum, used in +zlib, rsync, and other software. + +Please refer to the [changelog](CHANGELOG.md) to see what changed in the last +releases. + +## Features + +- Permissively licensed (0BSD) clean-room implementation. +- Zero dependencies. +- Decent performance (3-4 GB/s). +- Supports `#![no_std]` (with `default-features = false`). + +## Usage + +Add an entry to your `Cargo.toml`: + +```toml +[dependencies] +adler = "0.2.3" +``` + +Check the [API Documentation](https://docs.rs/adler/) for how to use the +crate's functionality. + +## Rust version support + +Currently, this crate supports all Rust versions starting at Rust 1.31.0. + +Bumping the Minimum Supported Rust Version (MSRV) is *not* considered a breaking +change, but will not be done without good reasons. The latest 3 stable Rust +versions will always be supported no matter what. diff -Nru cargo-0.44.1/vendor/adler/RELEASE_PROCESS.md cargo-0.47.0/vendor/adler/RELEASE_PROCESS.md --- cargo-0.44.1/vendor/adler/RELEASE_PROCESS.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/adler/RELEASE_PROCESS.md 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,13 @@ +# What to do to publish a new release + +1. Ensure all notable changes are in the changelog under "Unreleased". + +2. Execute `cargo release ` to bump version(s), tag and publish + everything. External subcommand, must be installed with `cargo install + cargo-release`. + + `` can be one of `major|minor|patch`. If this is the first release + (`0.1.0`), use `minor`, since the version starts out as `0.0.0`. + +3. Go to the GitHub releases, edit the just-pushed tag. Copy the release notes + from the changelog. diff -Nru cargo-0.44.1/vendor/adler/src/algo.rs cargo-0.47.0/vendor/adler/src/algo.rs --- cargo-0.44.1/vendor/adler/src/algo.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/adler/src/algo.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,146 @@ +use crate::Adler32; +use std::ops::{AddAssign, MulAssign, RemAssign}; + +impl Adler32 { + pub(crate) fn compute(&mut self, bytes: &[u8]) { + // The basic algorithm is, for every byte: + // a = (a + byte) % MOD + // b = (b + a) % MOD + // where MOD = 65521. + // + // For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows: + // - Between calls to `write`, we ensure that a and b are always in range 0..MOD. + // - We use 32-bit arithmetic in this function. + // - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD` + // operation. + // + // According to Wikipedia, b is calculated as follows for non-incremental checksumming: + // b = n×D1 + (n−1)×D2 + (n−2)×D3 + ... + Dn + n*1 (mod 65521) + // Where n is the number of bytes and Di is the i-th Byte. We need to change this to account + // for the previous values of a and b, as well as treat every input Byte as being 255: + // b_inc = n×255 + (n-1)×255 + ... + 255 + n*65520 + // Or in other words: + // b_inc = n*65520 + n(n+1)/2*255 + // The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521. + // 2^32-65521 = n*65520 + n(n+1)/2*255 + // Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552. + // + // On top of the optimization outlined above, the algorithm can also be parallelized with a + // bit more work: + // + // Note that b is a linear combination of a vector of input bytes (D1, ..., Dn). + // + // If we fix some value k Self { + U32X4([ + u32::from(bytes[0]), + u32::from(bytes[1]), + u32::from(bytes[2]), + u32::from(bytes[3]), + ]) + } +} + +impl AddAssign for U32X4 { + fn add_assign(&mut self, other: Self) { + for (s, o) in self.0.iter_mut().zip(other.0.iter()) { + *s += o; + } + } +} + +impl RemAssign for U32X4 { + fn rem_assign(&mut self, quotient: u32) { + for s in self.0.iter_mut() { + *s %= quotient; + } + } +} + +impl MulAssign for U32X4 { + fn mul_assign(&mut self, rhs: u32) { + for s in self.0.iter_mut() { + *s *= rhs; + } + } +} diff -Nru cargo-0.44.1/vendor/adler/src/lib.rs cargo-0.47.0/vendor/adler/src/lib.rs --- cargo-0.44.1/vendor/adler/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/adler/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,215 @@ +//! Adler-32 checksum implementation. +//! +//! This implementation features: +//! +//! - Permissively licensed (0BSD) clean-room implementation. +//! - Zero dependencies. +//! - Decent performance (3-4 GB/s). +//! - `#![no_std]` support (with `default-features = false`). + +#![doc(html_root_url = "https://docs.rs/adler/0.2.3")] +// Deny a few warnings in doctests, since rustdoc `allow`s many warnings by default +#![doc(test(attr(deny(unused_imports, unused_must_use))))] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![warn(missing_debug_implementations)] +#![forbid(unsafe_code)] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(not(feature = "std"))] +extern crate core as std; + +mod algo; + +use std::hash::Hasher; + +#[cfg(feature = "std")] +use std::io::{self, BufRead}; + +/// Adler-32 checksum calculator. +/// +/// An instance of this type is equivalent to an Adler-32 checksum: It can be created in the default +/// state via [`new`] (or the provided `Default` impl), or from a precalculated checksum via +/// [`from_checksum`], and the currently stored checksum can be fetched via [`checksum`]. +/// +/// This type also implements `Hasher`, which makes it easy to calculate Adler-32 checksums of any +/// type that implements or derives `Hash`. This also allows using Adler-32 in a `HashMap`, although +/// that is not recommended (while every checksum is a hash, they are not necessarily good at being +/// one). +/// +/// [`new`]: #method.new +/// [`from_checksum`]: #method.from_checksum +/// [`checksum`]: #method.checksum +#[derive(Debug, Copy, Clone)] +pub struct Adler32 { + a: u16, + b: u16, +} + +impl Adler32 { + /// Creates a new Adler-32 instance with default state. + #[inline] + pub fn new() -> Self { + Self::default() + } + + /// Creates an `Adler32` instance from a precomputed Adler-32 checksum. + /// + /// This allows resuming checksum calculation without having to keep the `Adler32` instance + /// around. + /// + /// # Example + /// + /// ``` + /// # use adler::Adler32; + /// let parts = [ + /// "rust", + /// "acean", + /// ]; + /// let whole = adler::adler32_slice(b"rustacean"); + /// + /// let mut sum = Adler32::new(); + /// sum.write_slice(parts[0].as_bytes()); + /// let partial = sum.checksum(); + /// + /// // ...later + /// + /// let mut sum = Adler32::from_checksum(partial); + /// sum.write_slice(parts[1].as_bytes()); + /// assert_eq!(sum.checksum(), whole); + /// ``` + #[inline] + pub fn from_checksum(sum: u32) -> Self { + Adler32 { + a: sum as u16, + b: (sum >> 16) as u16, + } + } + + /// Returns the calculated checksum at this point in time. + #[inline] + pub fn checksum(&self) -> u32 { + (u32::from(self.b) << 16) | u32::from(self.a) + } + + /// Adds `bytes` to the checksum calculation. + /// + /// If efficiency matters, this should be called with Byte slices that contain at least a few + /// thousand Bytes. + pub fn write_slice(&mut self, bytes: &[u8]) { + self.compute(bytes); + } +} + +impl Default for Adler32 { + #[inline] + fn default() -> Self { + Adler32 { a: 1, b: 0 } + } +} + +impl Hasher for Adler32 { + #[inline] + fn finish(&self) -> u64 { + u64::from(self.checksum()) + } + + fn write(&mut self, bytes: &[u8]) { + self.write_slice(bytes); + } +} + +/// Calculates the Adler-32 checksum of a byte slice. +pub fn adler32_slice(data: &[u8]) -> u32 { + let mut h = Adler32::new(); + h.write_slice(data); + h.checksum() +} + +/// Calculates the Adler-32 checksum of a `BufRead`'s contents. +/// +/// The passed `BufRead` implementor will be read until it reaches EOF. +/// +/// If you only have a `Read` implementor, wrap it in `std::io::BufReader`. +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +pub fn adler32_reader(reader: &mut R) -> io::Result { + let mut h = Adler32::new(); + loop { + let len = { + let buf = reader.fill_buf()?; + if buf.is_empty() { + return Ok(h.checksum()); + } + + h.write_slice(buf); + buf.len() + }; + reader.consume(len); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io::BufReader; + + #[test] + fn zeroes() { + assert_eq!(adler32_slice(&[]), 1); + assert_eq!(adler32_slice(&[0]), 1 | 1 << 16); + assert_eq!(adler32_slice(&[0, 0]), 1 | 2 << 16); + assert_eq!(adler32_slice(&[0; 100]), 0x00640001); + assert_eq!(adler32_slice(&[0; 1024]), 0x04000001); + assert_eq!(adler32_slice(&[0; 1024 * 1024]), 0x00f00001); + } + + #[test] + fn ones() { + assert_eq!(adler32_slice(&[0xff; 1024]), 0x79a6fc2e); + assert_eq!(adler32_slice(&[0xff; 1024 * 1024]), 0x8e88ef11); + } + + #[test] + fn mixed() { + assert_eq!(adler32_slice(&[1]), 2 | 2 << 16); + assert_eq!(adler32_slice(&[40]), 41 | 41 << 16); + + assert_eq!(adler32_slice(&[0xA5; 1024 * 1024]), 0xd5009ab1); + } + + /// Example calculation from https://en.wikipedia.org/wiki/Adler-32. + #[test] + fn wiki() { + assert_eq!(adler32_slice(b"Wikipedia"), 0x11E60398); + } + + #[test] + fn resume() { + let mut adler = Adler32::new(); + adler.write_slice(&[0xff; 1024]); + let partial = adler.checksum(); + assert_eq!(partial, 0x79a6fc2e); // from above + adler.write_slice(&[0xff; 1024 * 1024 - 1024]); + assert_eq!(adler.checksum(), 0x8e88ef11); // from above + + // Make sure that we can resume computing from the partial checksum via `from_checksum`. + let mut adler = Adler32::from_checksum(partial); + adler.write_slice(&[0xff; 1024 * 1024 - 1024]); + assert_eq!(adler.checksum(), 0x8e88ef11); // from above + } + + #[test] + fn bufread() { + fn test(data: &[u8], checksum: u32) { + // `BufReader` uses an 8 KB buffer, so this will test buffer refilling. + let mut buf = BufReader::new(data); + let real_sum = adler32_reader(&mut buf).unwrap(); + assert_eq!(checksum, real_sum); + } + + test(&[], 1); + test(&[0; 1024], 0x04000001); + test(&[0; 1024 * 1024], 0x00f00001); + test(&[0xA5; 1024 * 1024], 0xd5009ab1); + } +} diff -Nru cargo-0.44.1/vendor/adler32/appveyor.yml cargo-0.47.0/vendor/adler32/appveyor.yml --- cargo-0.44.1/vendor/adler32/appveyor.yml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/adler32/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -install: - - ps: Start-FileDownload 'https://static.rust-lang.org/dist/rust-nightly-i686-pc-windows-gnu.exe' - - rust-nightly-i686-pc-windows-gnu.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" - - set PATH=%PATH%;C:\Program Files (x86)\Rust\bin - - rustc -V - - cargo -V - -build: false - -test_script: - - cargo build --verbose - - cargo test --verbose diff -Nru cargo-0.44.1/vendor/adler32/.cargo-checksum.json cargo-0.47.0/vendor/adler32/.cargo-checksum.json --- cargo-0.44.1/vendor/adler32/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/adler32/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{},"package":"5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/adler32/Cargo.toml cargo-0.47.0/vendor/adler32/Cargo.toml --- cargo-0.44.1/vendor/adler32/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/adler32/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "adler32" -version = "1.0.4" -authors = ["Remi Rampin "] -description = "Minimal Adler32 implementation for Rust." -documentation = "https://remram44.github.io/adler32-rs/index.html" -readme = "README.md" -keywords = ["adler32", "hash", "rolling"] -license = "Zlib" -repository = "https://github.com/remram44/adler32-rs" -[dev-dependencies.rand] -version = "0.7" diff -Nru cargo-0.44.1/vendor/adler32/debian/patches/relax-test-dep.diff cargo-0.47.0/vendor/adler32/debian/patches/relax-test-dep.diff --- cargo-0.44.1/vendor/adler32/debian/patches/relax-test-dep.diff 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/adler32/debian/patches/relax-test-dep.diff 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -Index: adler32/Cargo.toml -=================================================================== ---- adler32.orig/Cargo.toml -+++ adler32/Cargo.toml -@@ -21,4 +21,4 @@ keywords = ["adler32", "hash", "rolling" - license = "Zlib" - repository = "https://github.com/remram44/adler32-rs" - [dev-dependencies.rand] --version = "0.4" -+version = "0.7" -Index: adler32/src/lib.rs -=================================================================== ---- adler32.orig/src/lib.rs -+++ adler32/src/lib.rs -@@ -252,7 +252,7 @@ mod test { - let mut data = vec![0u8; 5589]; - for size in [0, 1, 3, 4, 5, 31, 32, 33, 67, - 5550, 5552, 5553, 5568, 5584, 5589].iter().cloned() { -- rng.fill_bytes(&mut data[..size]); -+ rng.fill(&mut data[..size]); - let r1 = io::Cursor::new(&data[..size]); - let r2 = r1.clone(); - if adler32_slow(r1).unwrap() != adler32(r2).unwrap() { diff -Nru cargo-0.44.1/vendor/adler32/debian/patches/series cargo-0.47.0/vendor/adler32/debian/patches/series --- cargo-0.44.1/vendor/adler32/debian/patches/series 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/adler32/debian/patches/series 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -relax-test-dep.diff diff -Nru cargo-0.44.1/vendor/adler32/LICENSE cargo-0.47.0/vendor/adler32/LICENSE --- cargo-0.44.1/vendor/adler32/LICENSE 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/adler32/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -Copyright notice for the Rust port: - - (C) 2016 Remi Rampin - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - -Copyright notice for the original C code from the zlib project: - - (C) 1995-2017 Jean-loup Gailly and Mark Adler - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - Jean-loup Gailly Mark Adler - jloup@gzip.org madler@alumni.caltech.edu diff -Nru cargo-0.44.1/vendor/adler32/.pc/applied-patches cargo-0.47.0/vendor/adler32/.pc/applied-patches --- cargo-0.44.1/vendor/adler32/.pc/applied-patches 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/adler32/.pc/applied-patches 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -relax-test-dep.diff diff -Nru cargo-0.44.1/vendor/adler32/.pc/.quilt_patches cargo-0.47.0/vendor/adler32/.pc/.quilt_patches --- cargo-0.44.1/vendor/adler32/.pc/.quilt_patches 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/adler32/.pc/.quilt_patches 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -debian/patches diff -Nru cargo-0.44.1/vendor/adler32/.pc/.quilt_series cargo-0.47.0/vendor/adler32/.pc/.quilt_series --- cargo-0.44.1/vendor/adler32/.pc/.quilt_series 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/adler32/.pc/.quilt_series 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -series diff -Nru cargo-0.44.1/vendor/adler32/.pc/relax-test-dep.diff/Cargo.toml cargo-0.47.0/vendor/adler32/.pc/relax-test-dep.diff/Cargo.toml --- cargo-0.44.1/vendor/adler32/.pc/relax-test-dep.diff/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/adler32/.pc/relax-test-dep.diff/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "adler32" -version = "1.0.4" -authors = ["Remi Rampin "] -description = "Minimal Adler32 implementation for Rust." -documentation = "https://remram44.github.io/adler32-rs/index.html" -readme = "README.md" -keywords = ["adler32", "hash", "rolling"] -license = "Zlib" -repository = "https://github.com/remram44/adler32-rs" -[dev-dependencies.rand] -version = "0.4" diff -Nru cargo-0.44.1/vendor/adler32/.pc/relax-test-dep.diff/src/lib.rs cargo-0.47.0/vendor/adler32/.pc/relax-test-dep.diff/src/lib.rs --- cargo-0.44.1/vendor/adler32/.pc/relax-test-dep.diff/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/adler32/.pc/relax-test-dep.diff/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,307 +0,0 @@ -//! A minimal implementation of Adler32 for Rust. -//! -//! This provides the simple method adler32(), that exhausts a Read and -//! computes the Adler32 hash, as well as the RollingAdler32 struct, that can -//! build a hash byte-by-byte, allowing to 'forget' past bytes in a rolling -//! fashion. -//! -//! The adler32 code has been translated (as accurately as I could manage) from -//! the zlib implementation. - -#[cfg(test)] -extern crate rand; - -use std::io; - -// adler32 algorithm and implementation taken from zlib; http://www.zlib.net/ -// It was translated into Rust as accurately as I could manage -// The (slow) reference was taken from Wikipedia; https://en.wikipedia.org/ - -/* zlib.h -- interface of the 'zlib' general purpose compression library - version 1.2.8, April 28th, 2013 - - Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - Jean-loup Gailly Mark Adler - jloup@gzip.org madler@alumni.caltech.edu - -*/ - -// largest prime smaller than 65536 -const BASE: u32 = 65521; - -// NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 -const NMAX: usize = 5552; - -#[inline(always)] -fn do1(adler: &mut u32, sum2: &mut u32, buf: &[u8]) { - *adler += u32::from(buf[0]); - *sum2 += *adler; -} - -#[inline(always)] -fn do2(adler: &mut u32, sum2: &mut u32, buf: &[u8]) { - do1(adler, sum2, &buf[0..1]); - do1(adler, sum2, &buf[1..2]); -} - -#[inline(always)] -fn do4(adler: &mut u32, sum2: &mut u32, buf: &[u8]) { - do2(adler, sum2, &buf[0..2]); - do2(adler, sum2, &buf[2..4]); -} - -#[inline(always)] -fn do8(adler: &mut u32, sum2: &mut u32, buf: &[u8]) { - do4(adler, sum2, &buf[0..4]); - do4(adler, sum2, &buf[4..8]); -} - -#[inline(always)] -fn do16(adler: &mut u32, sum2: &mut u32, buf: &[u8]) { - do8(adler, sum2, &buf[0..8]); - do8(adler, sum2, &buf[8..16]); -} - -/// A rolling version of the Adler32 hash, which can 'forget' past bytes. -/// -/// Calling remove() will update the hash to the value it would have if that -/// past byte had never been fed to the algorithm. This allows you to get the -/// hash of a rolling window very efficiently. -pub struct RollingAdler32 { - a: u32, - b: u32, -} - -impl Default for RollingAdler32 { - fn default() -> RollingAdler32 { - RollingAdler32::new() - } -} - -impl RollingAdler32 { - /// Creates an empty Adler32 context (with hash 1). - pub fn new() -> RollingAdler32 { - Self::from_value(1) - } - - /// Creates an Adler32 context with the given initial value. - pub fn from_value(adler32: u32) -> RollingAdler32 { - let a = adler32 & 0xFFFF; - let b = adler32 >> 16; - RollingAdler32 { a, b } - } - - /// Convenience function initializing a context from the hash of a buffer. - pub fn from_buffer(buffer: &[u8]) -> RollingAdler32 { - let mut hash = RollingAdler32::new(); - hash.update_buffer(buffer); - hash - } - - /// Returns the current hash. - pub fn hash(&self) -> u32 { - (self.b << 16) | self.a - } - - /// Removes the given `byte` that was fed to the algorithm `size` bytes ago. - pub fn remove(&mut self, size: usize, byte: u8) { - let byte = u32::from(byte); - self.a = (self.a + BASE - byte) % BASE; - self.b = ((self.b + BASE - 1) - .wrapping_add(BASE.wrapping_sub(size as u32) - .wrapping_mul(byte))) % BASE; - } - - /// Feeds a new `byte` to the algorithm to update the hash. - pub fn update(&mut self, byte: u8) { - let byte = u32::from(byte); - self.a = (self.a + byte) % BASE; - self.b = (self.b + self.a) % BASE; - } - - /// Feeds a vector of bytes to the algorithm to update the hash. - pub fn update_buffer(&mut self, buffer: &[u8]) { - let len = buffer.len(); - - // in case user likes doing a byte at a time, keep it fast - if len == 1 { - self.update(buffer[0]); - return; - } - - // in case short lengths are provided, keep it somewhat fast - if len < 16 { - for byte in buffer.iter().take(len) { - self.a += u32::from(*byte); - self.b += self.a; - } - if self.a >= BASE { - self.a -= BASE; - } - self.b %= BASE; - return; - } - - let mut pos = 0; - - // do length NMAX blocks -- requires just one modulo operation; - while pos + NMAX <= len { - let end = pos + NMAX; - while pos < end { - // 16 sums unrolled - do16(&mut self.a, &mut self.b, &buffer[pos..pos + 16]); - pos += 16; - } - self.a %= BASE; - self.b %= BASE; - } - - // do remaining bytes (less than NMAX, still just one modulo) - if pos < len { // avoid modulos if none remaining - while len - pos >= 16 { - do16(&mut self.a, &mut self.b, &buffer[pos..pos + 16]); - pos += 16; - } - while len - pos > 0 { - self.a += u32::from(buffer[pos]); - self.b += self.a; - pos += 1; - } - self.a %= BASE; - self.b %= BASE; - } - } -} - -/// Consume a Read object and returns the Adler32 hash. -pub fn adler32(mut reader: R) -> io::Result { - let mut hash = RollingAdler32::new(); - let mut buffer = [0u8; NMAX]; - let mut read = try!(reader.read(&mut buffer)); - while read > 0 { - hash.update_buffer(&buffer[..read]); - read = try!(reader.read(&mut buffer)); - } - Ok(hash.hash()) -} - -#[cfg(test)] -mod test { - use rand; - use rand::Rng; - use std::io; - - use super::{BASE, adler32, RollingAdler32}; - - fn adler32_slow(reader: R) -> io::Result { - let mut a: u32 = 1; - let mut b: u32 = 0; - - for byte in reader.bytes() { - let byte = try!(byte) as u32; - a = (a + byte) % BASE; - b = (b + a) % BASE; - } - - Ok((b << 16) | a) - } - - #[test] - fn testvectors() { - fn do_test(v: u32, bytes: &[u8]) { - let mut hash = RollingAdler32::new(); - hash.update_buffer(&bytes); - assert_eq!(hash.hash(), v); - - let r = io::Cursor::new(bytes); - assert_eq!(adler32(r).unwrap(), v); - } - do_test(0x00000001, b""); - do_test(0x00620062, b"a"); - do_test(0x024d0127, b"abc"); - do_test(0x29750586, b"message digest"); - do_test(0x90860b20, b"abcdefghijklmnopqrstuvwxyz"); - do_test(0x8adb150c, b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\ - abcdefghijklmnopqrstuvwxyz\ - 0123456789"); - do_test(0x97b61069, b"1234567890123456789012345678901234567890\ - 1234567890123456789012345678901234567890"); - do_test(0xD6251498, &[255; 64000]); - } - - #[test] - fn compare() { - let mut rng = rand::thread_rng(); - let mut data = vec![0u8; 5589]; - for size in [0, 1, 3, 4, 5, 31, 32, 33, 67, - 5550, 5552, 5553, 5568, 5584, 5589].iter().cloned() { - rng.fill_bytes(&mut data[..size]); - let r1 = io::Cursor::new(&data[..size]); - let r2 = r1.clone(); - if adler32_slow(r1).unwrap() != adler32(r2).unwrap() { - panic!("Comparison failed, size={}", size); - } - } - } - - #[test] - fn rolling() { - assert_eq!(RollingAdler32::from_value(0x01020304).hash(), 0x01020304); - - fn do_test(a: &[u8], b: &[u8]) { - let mut total = Vec::with_capacity(a.len() + b.len()); - total.extend(a); - total.extend(b); - let mut h = RollingAdler32::from_buffer(&total[..(b.len())]); - for i in 0..(a.len()) { - h.remove(b.len(), a[i]); - h.update(total[b.len() + i]); - } - assert_eq!(h.hash(), adler32(b).unwrap()); - } - do_test(b"a", b"b"); - do_test(b"", b"this a test"); - do_test(b"th", b"is a test"); - do_test(b"this a ", b"test"); - } - - #[test] - fn long_window_remove() { - let mut hash = RollingAdler32::new(); - let w = 65536; - assert!(w as u32 > BASE); - - let mut bytes = vec![0; w*3]; - for (i, b) in bytes.iter_mut().enumerate() { - *b = i as u8; - } - - for (i, b) in bytes.iter().enumerate() { - if i >= w { - hash.remove(w, bytes[i - w]); - } - hash.update(*b); - if i > 0 && i % w == 0 { - assert_eq!(hash.hash(), 0x433a8772); - } - } - assert_eq!(hash.hash(), 0xbbba8772); - } -} diff -Nru cargo-0.44.1/vendor/adler32/.pc/.version cargo-0.47.0/vendor/adler32/.pc/.version --- cargo-0.44.1/vendor/adler32/.pc/.version 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/adler32/.pc/.version 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -2 diff -Nru cargo-0.44.1/vendor/adler32/README.md cargo-0.47.0/vendor/adler32/README.md --- cargo-0.44.1/vendor/adler32/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/adler32/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -[![Build Status](https://travis-ci.org/remram44/adler32-rs.svg?branch=master)](https://travis-ci.org/remram44/adler32-rs/builds) -[![Win Build](https://ci.appveyor.com/api/projects/status/ekyg20rd6rwrus64/branch/master?svg=true)](https://ci.appveyor.com/project/remram44/adler32-rs) -[![Crates.io](https://img.shields.io/crates/v/adler32.svg)](https://crates.io/crates/adler32) -[![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/remram44) - -What is this? -============= - -It is an implementation of the [Adler32 rolling hash algorithm](https://en.wikipedia.org/wiki/Adler-32) in the [Rust programming language](https://www.rust-lang.org/). - -It is adapted from Jean-Loup Gailly's and Mark Adler's [original implementation in zlib](https://github.com/madler/zlib/blob/2fa463bacfff79181df1a5270fb67cc679a53e71/adler32.c). A copy of the zlib copyright and license can be found in LICENSE-ZLIB. - -[Generated documentation](https://remram44.github.io/adler32-rs/index.html) diff -Nru cargo-0.44.1/vendor/adler32/src/lib.rs cargo-0.47.0/vendor/adler32/src/lib.rs --- cargo-0.44.1/vendor/adler32/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/adler32/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,307 +0,0 @@ -//! A minimal implementation of Adler32 for Rust. -//! -//! This provides the simple method adler32(), that exhausts a Read and -//! computes the Adler32 hash, as well as the RollingAdler32 struct, that can -//! build a hash byte-by-byte, allowing to 'forget' past bytes in a rolling -//! fashion. -//! -//! The adler32 code has been translated (as accurately as I could manage) from -//! the zlib implementation. - -#[cfg(test)] -extern crate rand; - -use std::io; - -// adler32 algorithm and implementation taken from zlib; http://www.zlib.net/ -// It was translated into Rust as accurately as I could manage -// The (slow) reference was taken from Wikipedia; https://en.wikipedia.org/ - -/* zlib.h -- interface of the 'zlib' general purpose compression library - version 1.2.8, April 28th, 2013 - - Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - Jean-loup Gailly Mark Adler - jloup@gzip.org madler@alumni.caltech.edu - -*/ - -// largest prime smaller than 65536 -const BASE: u32 = 65521; - -// NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 -const NMAX: usize = 5552; - -#[inline(always)] -fn do1(adler: &mut u32, sum2: &mut u32, buf: &[u8]) { - *adler += u32::from(buf[0]); - *sum2 += *adler; -} - -#[inline(always)] -fn do2(adler: &mut u32, sum2: &mut u32, buf: &[u8]) { - do1(adler, sum2, &buf[0..1]); - do1(adler, sum2, &buf[1..2]); -} - -#[inline(always)] -fn do4(adler: &mut u32, sum2: &mut u32, buf: &[u8]) { - do2(adler, sum2, &buf[0..2]); - do2(adler, sum2, &buf[2..4]); -} - -#[inline(always)] -fn do8(adler: &mut u32, sum2: &mut u32, buf: &[u8]) { - do4(adler, sum2, &buf[0..4]); - do4(adler, sum2, &buf[4..8]); -} - -#[inline(always)] -fn do16(adler: &mut u32, sum2: &mut u32, buf: &[u8]) { - do8(adler, sum2, &buf[0..8]); - do8(adler, sum2, &buf[8..16]); -} - -/// A rolling version of the Adler32 hash, which can 'forget' past bytes. -/// -/// Calling remove() will update the hash to the value it would have if that -/// past byte had never been fed to the algorithm. This allows you to get the -/// hash of a rolling window very efficiently. -pub struct RollingAdler32 { - a: u32, - b: u32, -} - -impl Default for RollingAdler32 { - fn default() -> RollingAdler32 { - RollingAdler32::new() - } -} - -impl RollingAdler32 { - /// Creates an empty Adler32 context (with hash 1). - pub fn new() -> RollingAdler32 { - Self::from_value(1) - } - - /// Creates an Adler32 context with the given initial value. - pub fn from_value(adler32: u32) -> RollingAdler32 { - let a = adler32 & 0xFFFF; - let b = adler32 >> 16; - RollingAdler32 { a, b } - } - - /// Convenience function initializing a context from the hash of a buffer. - pub fn from_buffer(buffer: &[u8]) -> RollingAdler32 { - let mut hash = RollingAdler32::new(); - hash.update_buffer(buffer); - hash - } - - /// Returns the current hash. - pub fn hash(&self) -> u32 { - (self.b << 16) | self.a - } - - /// Removes the given `byte` that was fed to the algorithm `size` bytes ago. - pub fn remove(&mut self, size: usize, byte: u8) { - let byte = u32::from(byte); - self.a = (self.a + BASE - byte) % BASE; - self.b = ((self.b + BASE - 1) - .wrapping_add(BASE.wrapping_sub(size as u32) - .wrapping_mul(byte))) % BASE; - } - - /// Feeds a new `byte` to the algorithm to update the hash. - pub fn update(&mut self, byte: u8) { - let byte = u32::from(byte); - self.a = (self.a + byte) % BASE; - self.b = (self.b + self.a) % BASE; - } - - /// Feeds a vector of bytes to the algorithm to update the hash. - pub fn update_buffer(&mut self, buffer: &[u8]) { - let len = buffer.len(); - - // in case user likes doing a byte at a time, keep it fast - if len == 1 { - self.update(buffer[0]); - return; - } - - // in case short lengths are provided, keep it somewhat fast - if len < 16 { - for byte in buffer.iter().take(len) { - self.a += u32::from(*byte); - self.b += self.a; - } - if self.a >= BASE { - self.a -= BASE; - } - self.b %= BASE; - return; - } - - let mut pos = 0; - - // do length NMAX blocks -- requires just one modulo operation; - while pos + NMAX <= len { - let end = pos + NMAX; - while pos < end { - // 16 sums unrolled - do16(&mut self.a, &mut self.b, &buffer[pos..pos + 16]); - pos += 16; - } - self.a %= BASE; - self.b %= BASE; - } - - // do remaining bytes (less than NMAX, still just one modulo) - if pos < len { // avoid modulos if none remaining - while len - pos >= 16 { - do16(&mut self.a, &mut self.b, &buffer[pos..pos + 16]); - pos += 16; - } - while len - pos > 0 { - self.a += u32::from(buffer[pos]); - self.b += self.a; - pos += 1; - } - self.a %= BASE; - self.b %= BASE; - } - } -} - -/// Consume a Read object and returns the Adler32 hash. -pub fn adler32(mut reader: R) -> io::Result { - let mut hash = RollingAdler32::new(); - let mut buffer = [0u8; NMAX]; - let mut read = try!(reader.read(&mut buffer)); - while read > 0 { - hash.update_buffer(&buffer[..read]); - read = try!(reader.read(&mut buffer)); - } - Ok(hash.hash()) -} - -#[cfg(test)] -mod test { - use rand; - use rand::Rng; - use std::io; - - use super::{BASE, adler32, RollingAdler32}; - - fn adler32_slow(reader: R) -> io::Result { - let mut a: u32 = 1; - let mut b: u32 = 0; - - for byte in reader.bytes() { - let byte = try!(byte) as u32; - a = (a + byte) % BASE; - b = (b + a) % BASE; - } - - Ok((b << 16) | a) - } - - #[test] - fn testvectors() { - fn do_test(v: u32, bytes: &[u8]) { - let mut hash = RollingAdler32::new(); - hash.update_buffer(&bytes); - assert_eq!(hash.hash(), v); - - let r = io::Cursor::new(bytes); - assert_eq!(adler32(r).unwrap(), v); - } - do_test(0x00000001, b""); - do_test(0x00620062, b"a"); - do_test(0x024d0127, b"abc"); - do_test(0x29750586, b"message digest"); - do_test(0x90860b20, b"abcdefghijklmnopqrstuvwxyz"); - do_test(0x8adb150c, b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\ - abcdefghijklmnopqrstuvwxyz\ - 0123456789"); - do_test(0x97b61069, b"1234567890123456789012345678901234567890\ - 1234567890123456789012345678901234567890"); - do_test(0xD6251498, &[255; 64000]); - } - - #[test] - fn compare() { - let mut rng = rand::thread_rng(); - let mut data = vec![0u8; 5589]; - for size in [0, 1, 3, 4, 5, 31, 32, 33, 67, - 5550, 5552, 5553, 5568, 5584, 5589].iter().cloned() { - rng.fill(&mut data[..size]); - let r1 = io::Cursor::new(&data[..size]); - let r2 = r1.clone(); - if adler32_slow(r1).unwrap() != adler32(r2).unwrap() { - panic!("Comparison failed, size={}", size); - } - } - } - - #[test] - fn rolling() { - assert_eq!(RollingAdler32::from_value(0x01020304).hash(), 0x01020304); - - fn do_test(a: &[u8], b: &[u8]) { - let mut total = Vec::with_capacity(a.len() + b.len()); - total.extend(a); - total.extend(b); - let mut h = RollingAdler32::from_buffer(&total[..(b.len())]); - for i in 0..(a.len()) { - h.remove(b.len(), a[i]); - h.update(total[b.len() + i]); - } - assert_eq!(h.hash(), adler32(b).unwrap()); - } - do_test(b"a", b"b"); - do_test(b"", b"this a test"); - do_test(b"th", b"is a test"); - do_test(b"this a ", b"test"); - } - - #[test] - fn long_window_remove() { - let mut hash = RollingAdler32::new(); - let w = 65536; - assert!(w as u32 > BASE); - - let mut bytes = vec![0; w*3]; - for (i, b) in bytes.iter_mut().enumerate() { - *b = i as u8; - } - - for (i, b) in bytes.iter().enumerate() { - if i >= w { - hash.remove(w, bytes[i - w]); - } - hash.update(*b); - if i > 0 && i % w == 0 { - assert_eq!(hash.hash(), 0x433a8772); - } - } - assert_eq!(hash.hash(), 0xbbba8772); - } -} diff -Nru cargo-0.44.1/vendor/aho-corasick/.cargo-checksum.json cargo-0.47.0/vendor/aho-corasick/.cargo-checksum.json --- cargo-0.44.1/vendor/aho-corasick/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/aho-corasick/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada"} \ No newline at end of file +{"files":{},"package":"043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/aho-corasick/Cargo.toml cargo-0.47.0/vendor/aho-corasick/Cargo.toml --- cargo-0.44.1/vendor/aho-corasick/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/aho-corasick/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "aho-corasick" -version = "0.7.10" +version = "0.7.13" authors = ["Andrew Gallant "] exclude = ["/aho-corasick-debug", "/ci/*", "/.travis.yml", "/appveyor.yml"] autotests = false diff -Nru cargo-0.44.1/vendor/aho-corasick/README.md cargo-0.47.0/vendor/aho-corasick/README.md --- cargo-0.44.1/vendor/aho-corasick/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/aho-corasick/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -5,8 +5,8 @@ search principally through an implementation of the [Aho-Corasick algorithm](https://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_algorithm), which builds a finite state machine for executing searches in linear time. -Features include case insensitive matching, overlapping matches and search & -replace in streams. +Features include case insensitive matching, overlapping matches, fast searching +via SIMD and optional full DFA construction and search & replace in streams. [![Build status](https://github.com/BurntSushi/aho-corasick/workflows/ci/badge.svg)](https://github.com/BurntSushi/aho-corasick/actions) [![](http://meritbadge.herokuapp.com/aho-corasick)](https://crates.io/crates/aho-corasick) @@ -103,7 +103,8 @@ let mut wtr = vec![]; let ac = AhoCorasick::new(patterns); -ac.stream_replace_all(rdr.as_bytes(), &mut wtr, replace_with)?; +ac.stream_replace_all(rdr.as_bytes(), &mut wtr, replace_with) + .expect("stream_replace_all failed"); assert_eq!(b"The slow grey sloth.".to_vec(), wtr); ``` diff -Nru cargo-0.44.1/vendor/aho-corasick/src/ahocorasick.rs cargo-0.47.0/vendor/aho-corasick/src/ahocorasick.rs --- cargo-0.44.1/vendor/aho-corasick/src/ahocorasick.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/aho-corasick/src/ahocorasick.rs 2020-10-01 21:38:28.000000000 +0000 @@ -502,7 +502,7 @@ /// The closure accepts three parameters: the match found, the text of /// the match and a string buffer with which to write the replaced text /// (if any). If the closure returns `true`, then it continues to the next - /// match. If the closure returns false, then searching is stopped. + /// match. If the closure returns `false`, then searching is stopped. /// /// # Examples /// @@ -524,6 +524,24 @@ /// }); /// assert_eq!("0 the 2 to the 0age", result); /// ``` + /// + /// Stopping the replacement by returning `false` (continued from the + /// example above): + /// + /// ``` + /// # use aho_corasick::{AhoCorasickBuilder, MatchKind}; + /// # let patterns = &["append", "appendage", "app"]; + /// # let haystack = "append the app to the appendage"; + /// # let ac = AhoCorasickBuilder::new() + /// # .match_kind(MatchKind::LeftmostFirst) + /// # .build(patterns); + /// let mut result = String::new(); + /// ac.replace_all_with(haystack, &mut result, |mat, _, dst| { + /// dst.push_str(&mat.pattern().to_string()); + /// mat.pattern() != 2 + /// }); + /// assert_eq!("0 the 2 to the appendage", result); + /// ``` pub fn replace_all_with( &self, haystack: &str, @@ -536,7 +554,9 @@ for mat in self.find_iter(haystack) { dst.push_str(&haystack[last_match..mat.start()]); last_match = mat.end(); - replace_with(&mat, &haystack[mat.start()..mat.end()], dst); + if !replace_with(&mat, &haystack[mat.start()..mat.end()], dst) { + break; + }; } dst.push_str(&haystack[last_match..]); } @@ -548,7 +568,7 @@ /// The closure accepts three parameters: the match found, the text of /// the match and a byte buffer with which to write the replaced text /// (if any). If the closure returns `true`, then it continues to the next - /// match. If the closure returns false, then searching is stopped. + /// match. If the closure returns `false`, then searching is stopped. /// /// # Examples /// @@ -570,6 +590,24 @@ /// }); /// assert_eq!(b"0 the 2 to the 0age".to_vec(), result); /// ``` + /// + /// Stopping the replacement by returning `false` (continued from the + /// example above): + /// + /// ``` + /// # use aho_corasick::{AhoCorasickBuilder, MatchKind}; + /// # let patterns = &["append", "appendage", "app"]; + /// # let haystack = b"append the app to the appendage"; + /// # let ac = AhoCorasickBuilder::new() + /// # .match_kind(MatchKind::LeftmostFirst) + /// # .build(patterns); + /// let mut result = vec![]; + /// ac.replace_all_with_bytes(haystack, &mut result, |mat, _, dst| { + /// dst.extend(mat.pattern().to_string().bytes()); + /// mat.pattern() != 2 + /// }); + /// assert_eq!(b"0 the 2 to the appendage".to_vec(), result); + /// ``` pub fn replace_all_with_bytes( &self, haystack: &[u8], @@ -582,7 +620,9 @@ for mat in self.find_iter(haystack) { dst.extend(&haystack[last_match..mat.start()]); last_match = mat.end(); - replace_with(&mat, &haystack[mat.start()..mat.end()], dst); + if !replace_with(&mat, &haystack[mat.start()..mat.end()], dst) { + break; + }; } dst.extend(&haystack[last_match..]); } @@ -735,9 +775,7 @@ /// [`find_iter`](struct.AhoCorasick.html#method.find_iter). /// /// The closure accepts three parameters: the match found, the text of - /// the match and the writer with which to write the replaced text - /// (if any). If the closure returns `true`, then it continues to the next - /// match. If the closure returns false, then searching is stopped. + /// the match and the writer with which to write the replaced text (if any). /// /// After all matches are replaced, the writer is _not_ flushed. /// diff -Nru cargo-0.44.1/vendor/aho-corasick/src/lib.rs cargo-0.47.0/vendor/aho-corasick/src/lib.rs --- cargo-0.44.1/vendor/aho-corasick/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/aho-corasick/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -168,13 +168,14 @@ are accelerated using vector instructions such as SIMD. For that reason, this library will internally use a "prefilter" to attempt -to accelerate searches when possible. Currently, this library has fairly -limited implementation that only applies when there are 3 or fewer unique -starting bytes among all patterns in an automaton. - -While a prefilter is generally good to have on by default since it works well -in the common case, it can lead to less predictable or even sub-optimal -performance in some cases. For that reason, prefilters can be disabled via +to accelerate searches when possible. Currently, this library has several +different algorithms it might use depending on the patterns provided. Once the +number of patterns gets too big, prefilters are no longer used. + +While a prefilter is generally good to have on by default since it works +well in the common case, it can lead to less predictable or even sub-optimal +performance in some cases. For that reason, prefilters can be explicitly +disabled via [`AhoCorasickBuilder::prefilter`](struct.AhoCorasickBuilder.html#method.prefilter). */ @@ -186,12 +187,12 @@ compile_error!("`std` feature is currently required to build this crate"); extern crate memchr; -#[cfg(test)] -#[macro_use] -extern crate doc_comment; +// #[cfg(doctest)] +// #[macro_use] +// extern crate doc_comment; -#[cfg(test)] -doctest!("../README.md"); +// #[cfg(doctest)] +// doctest!("../README.md"); pub use ahocorasick::{ AhoCorasick, AhoCorasickBuilder, FindIter, FindOverlappingIter, MatchKind, diff -Nru cargo-0.44.1/vendor/anyhow/.cargo-checksum.json cargo-0.47.0/vendor/anyhow/.cargo-checksum.json --- cargo-0.44.1/vendor/anyhow/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/anyhow/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"85bb70cc08ec97ca5450e6eba421deeea5f172c0fc61f78b5357b2a8e8be195f"} \ No newline at end of file +{"files":{},"package":"6b602bfe940d21c130f3895acd65221e8a61270debe89d628b9cb4e3ccb8569b"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/anyhow/Cargo.toml cargo-0.47.0/vendor/anyhow/Cargo.toml --- cargo-0.44.1/vendor/anyhow/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/anyhow/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "anyhow" -version = "1.0.31" +version = "1.0.32" authors = ["David Tolnay "] description = "Flexible concrete Error type built on std::error::Error" documentation = "https://docs.rs/anyhow" diff -Nru cargo-0.44.1/vendor/anyhow/src/error.rs cargo-0.47.0/vendor/anyhow/src/error.rs --- cargo-0.44.1/vendor/anyhow/src/error.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/anyhow/src/error.rs 2020-10-01 21:38:28.000000000 +0000 @@ -781,6 +781,12 @@ } } +impl From for Box { + fn from(error: Error) -> Self { + Box::::from(error) + } +} + impl From for Box { fn from(error: Error) -> Self { Box::::from(error) diff -Nru cargo-0.44.1/vendor/anyhow/src/lib.rs cargo-0.47.0/vendor/anyhow/src/lib.rs --- cargo-0.44.1/vendor/anyhow/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/anyhow/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -197,7 +197,7 @@ //! will require an explicit `.map_err(Error::msg)` when working with a //! non-Anyhow error type inside a function that returns Anyhow's error type. -#![doc(html_root_url = "https://docs.rs/anyhow/1.0.31")] +#![doc(html_root_url = "https://docs.rs/anyhow/1.0.32")] #![cfg_attr(backtrace, feature(backtrace))] #![cfg_attr(doc_cfg, feature(doc_cfg))] #![cfg_attr(not(feature = "std"), no_std)] diff -Nru cargo-0.44.1/vendor/anyhow/tests/test_macros.rs cargo-0.47.0/vendor/anyhow/tests/test_macros.rs --- cargo-0.44.1/vendor/anyhow/tests/test_macros.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/anyhow/tests/test_macros.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,3 +1,5 @@ +#![allow(clippy::eq_op)] + mod common; use self::common::*; diff -Nru cargo-0.44.1/vendor/anyhow/tests/ui/no-impl.stderr cargo-0.47.0/vendor/anyhow/tests/ui/no-impl.stderr --- cargo-0.44.1/vendor/anyhow/tests/ui/no-impl.stderr 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/anyhow/tests/ui/no-impl.stderr 2020-10-01 21:38:28.000000000 +0000 @@ -4,7 +4,7 @@ 4 | struct Error; | ------------- | | - | doesn't satisfy `Error: anyhow::kind::TraitKind` + | doesn't satisfy `Error: anyhow::private::kind::TraitKind` | doesn't satisfy `Error: std::convert::Into` | doesn't satisfy `Error: std::fmt::Display` ... @@ -13,9 +13,9 @@ | = note: the method `anyhow_kind` exists but the following trait bounds were not satisfied: `Error: std::convert::Into` - which is required by `Error: anyhow::kind::TraitKind` + which is required by `Error: anyhow::private::kind::TraitKind` `Error: std::fmt::Display` - which is required by `&Error: anyhow::kind::AdhocKind` + which is required by `&Error: anyhow::private::kind::AdhocKind` `&Error: std::convert::Into` - which is required by `&Error: anyhow::kind::TraitKind` + which is required by `&Error: anyhow::private::kind::TraitKind` = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff -Nru cargo-0.44.1/vendor/autocfg/.cargo-checksum.json cargo-0.47.0/vendor/autocfg/.cargo-checksum.json --- cargo-0.44.1/vendor/autocfg/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/autocfg/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"} \ No newline at end of file +{"files":{},"package":"cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/autocfg/Cargo.lock cargo-0.47.0/vendor/autocfg/Cargo.lock --- cargo-0.44.1/vendor/autocfg/Cargo.lock 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/autocfg/Cargo.lock 2020-10-01 21:38:28.000000000 +0000 @@ -2,5 +2,5 @@ # It is not intended for manual editing. [[package]] name = "autocfg" -version = "1.0.0" +version = "1.0.1" diff -Nru cargo-0.44.1/vendor/autocfg/Cargo.toml cargo-0.47.0/vendor/autocfg/Cargo.toml --- cargo-0.44.1/vendor/autocfg/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/autocfg/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,8 +12,9 @@ [package] name = "autocfg" -version = "1.0.0" +version = "1.0.1" authors = ["Josh Stone "] +exclude = ["/.github/**", "/bors.toml"] description = "Automatic cfg for Rust compiler features" readme = "README.md" keywords = ["rustc", "build", "autoconf"] diff -Nru cargo-0.44.1/vendor/autocfg/debian/patches/series cargo-0.47.0/vendor/autocfg/debian/patches/series --- cargo-0.44.1/vendor/autocfg/debian/patches/series 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/autocfg/debian/patches/series 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -use-tests-target-dir-envvar.diff diff -Nru cargo-0.44.1/vendor/autocfg/debian/patches/use-tests-target-dir-envvar.diff cargo-0.47.0/vendor/autocfg/debian/patches/use-tests-target-dir-envvar.diff --- cargo-0.44.1/vendor/autocfg/debian/patches/use-tests-target-dir-envvar.diff 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/autocfg/debian/patches/use-tests-target-dir-envvar.diff 1970-01-01 00:00:00.000000000 +0000 @@ -1,124 +0,0 @@ -Description: Use TESTS_TARGET_DIR envvar to override tests target dir - Rationale: At Debian, the autopkgtests are run inside a non-writable - directory, so the "target" subdirectory can't be used. Inside the - autopkgtest environment, we have an env variable AUTOPKGTEST_TMP - available which points to a temporary directory, that we can pass on to - the tests as TESTS_TARGET_DIR. This makes the tests pass properly inside - this environment. -Author: Wolfgang Silbermayr -Forwarded: https://github.com/cuviper/autocfg/pull/22 -Last-Update: 2020-02-18 ---- -This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ ---- a/src/tests.rs -+++ b/src/tests.rs -@@ -1,4 +1,5 @@ - use super::AutoCfg; -+use std::env; - - impl AutoCfg { - fn core_std(&self, path: &str) -> String { -@@ -13,11 +14,18 @@ - fn assert_min(&self, major: usize, minor: usize, probe_result: bool) { - assert_eq!(self.probe_rustc_version(major, minor), probe_result); - } -+ -+ fn for_test() -> Result { -+ match env::var_os("TESTS_TARGET_DIR") { -+ Some(d) => Self::with_dir(d), -+ None => Self::with_dir("target"), -+ } -+ } - } - - #[test] - fn autocfg_version() { -- let ac = AutoCfg::with_dir("target").unwrap(); -+ let ac = AutoCfg::for_test().unwrap(); - println!("version: {:?}", ac.rustc_version); - assert!(ac.probe_rustc_version(1, 0)); - } -@@ -37,7 +45,7 @@ - - #[test] - fn probe_add() { -- let ac = AutoCfg::with_dir("target").unwrap(); -+ let ac = AutoCfg::for_test().unwrap(); - let add = ac.core_std("ops::Add"); - let add_rhs = add.clone() + ""; - let add_rhs_output = add.clone() + ""; -@@ -51,7 +59,7 @@ - - #[test] - fn probe_as_ref() { -- let ac = AutoCfg::with_dir("target").unwrap(); -+ let ac = AutoCfg::for_test().unwrap(); - let as_ref = ac.core_std("convert::AsRef"); - let as_ref_str = as_ref.clone() + ""; - let dyn_as_ref_str = "dyn ".to_string() + &*as_ref_str; -@@ -63,7 +71,7 @@ - - #[test] - fn probe_i128() { -- let ac = AutoCfg::with_dir("target").unwrap(); -+ let ac = AutoCfg::for_test().unwrap(); - let i128_path = ac.core_std("i128"); - ac.assert_min(1, 26, ac.probe_path(&i128_path)); - ac.assert_min(1, 26, ac.probe_type("i128")); -@@ -71,7 +79,7 @@ - - #[test] - fn probe_sum() { -- let ac = AutoCfg::with_dir("target").unwrap(); -+ let ac = AutoCfg::for_test().unwrap(); - let sum = ac.core_std("iter::Sum"); - let sum_i32 = sum.clone() + ""; - let dyn_sum_i32 = "dyn ".to_string() + &*sum_i32; -@@ -84,25 +92,25 @@ - - #[test] - fn probe_std() { -- let ac = AutoCfg::with_dir("target").unwrap(); -+ let ac = AutoCfg::for_test().unwrap(); - ac.assert_std(ac.probe_sysroot_crate("std")); - } - - #[test] - fn probe_alloc() { -- let ac = AutoCfg::with_dir("target").unwrap(); -+ let ac = AutoCfg::for_test().unwrap(); - ac.assert_min(1, 36, ac.probe_sysroot_crate("alloc")); - } - - #[test] - fn probe_bad_sysroot_crate() { -- let ac = AutoCfg::with_dir("target").unwrap(); -+ let ac = AutoCfg::for_test().unwrap(); - assert!(!ac.probe_sysroot_crate("doesnt_exist")); - } - - #[test] - fn probe_no_std() { -- let ac = AutoCfg::with_dir("target").unwrap(); -+ let ac = AutoCfg::for_test().unwrap(); - assert!(ac.probe_type("i32")); - assert!(ac.probe_type("[i32]")); - ac.assert_std(ac.probe_type("Vec")); -@@ -110,7 +118,7 @@ - - #[test] - fn probe_expression() { -- let ac = AutoCfg::with_dir("target").unwrap(); -+ let ac = AutoCfg::for_test().unwrap(); - assert!(ac.probe_expression(r#""test".trim_left()"#)); - ac.assert_min(1, 30, ac.probe_expression(r#""test".trim_start()"#)); - ac.assert_std(ac.probe_expression("[1, 2, 3].to_vec()")); -@@ -118,7 +126,7 @@ - - #[test] - fn probe_constant() { -- let ac = AutoCfg::with_dir("target").unwrap(); -+ let ac = AutoCfg::for_test().unwrap(); - assert!(ac.probe_constant("1 + 2 + 3")); - ac.assert_min(1, 33, ac.probe_constant("{ let x = 1 + 2 + 3; x * x }")); - ac.assert_min(1, 39, ac.probe_constant(r#""test".len()"#)); diff -Nru cargo-0.44.1/vendor/autocfg/.pc/applied-patches cargo-0.47.0/vendor/autocfg/.pc/applied-patches --- cargo-0.44.1/vendor/autocfg/.pc/applied-patches 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/autocfg/.pc/applied-patches 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -use-tests-target-dir-envvar.diff diff -Nru cargo-0.44.1/vendor/autocfg/.pc/.quilt_patches cargo-0.47.0/vendor/autocfg/.pc/.quilt_patches --- cargo-0.44.1/vendor/autocfg/.pc/.quilt_patches 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/autocfg/.pc/.quilt_patches 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -debian/patches diff -Nru cargo-0.44.1/vendor/autocfg/.pc/.quilt_series cargo-0.47.0/vendor/autocfg/.pc/.quilt_series --- cargo-0.44.1/vendor/autocfg/.pc/.quilt_series 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/autocfg/.pc/.quilt_series 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -series diff -Nru cargo-0.44.1/vendor/autocfg/.pc/use-tests-target-dir-envvar.diff/src/tests.rs cargo-0.47.0/vendor/autocfg/.pc/use-tests-target-dir-envvar.diff/src/tests.rs --- cargo-0.44.1/vendor/autocfg/.pc/use-tests-target-dir-envvar.diff/src/tests.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/autocfg/.pc/use-tests-target-dir-envvar.diff/src/tests.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,125 +0,0 @@ -use super::AutoCfg; - -impl AutoCfg { - fn core_std(&self, path: &str) -> String { - let krate = if self.no_std { "core" } else { "std" }; - format!("{}::{}", krate, path) - } - - fn assert_std(&self, probe_result: bool) { - assert_eq!(!self.no_std, probe_result); - } - - fn assert_min(&self, major: usize, minor: usize, probe_result: bool) { - assert_eq!(self.probe_rustc_version(major, minor), probe_result); - } -} - -#[test] -fn autocfg_version() { - let ac = AutoCfg::with_dir("target").unwrap(); - println!("version: {:?}", ac.rustc_version); - assert!(ac.probe_rustc_version(1, 0)); -} - -#[test] -fn version_cmp() { - use super::version::Version; - let v123 = Version::new(1, 2, 3); - - assert!(Version::new(1, 0, 0) < v123); - assert!(Version::new(1, 2, 2) < v123); - assert!(Version::new(1, 2, 3) == v123); - assert!(Version::new(1, 2, 4) > v123); - assert!(Version::new(1, 10, 0) > v123); - assert!(Version::new(2, 0, 0) > v123); -} - -#[test] -fn probe_add() { - let ac = AutoCfg::with_dir("target").unwrap(); - let add = ac.core_std("ops::Add"); - let add_rhs = add.clone() + ""; - let add_rhs_output = add.clone() + ""; - let dyn_add_rhs_output = "dyn ".to_string() + &*add_rhs_output; - assert!(ac.probe_path(&add)); - assert!(ac.probe_trait(&add)); - assert!(ac.probe_trait(&add_rhs)); - assert!(ac.probe_trait(&add_rhs_output)); - ac.assert_min(1, 27, ac.probe_type(&dyn_add_rhs_output)); -} - -#[test] -fn probe_as_ref() { - let ac = AutoCfg::with_dir("target").unwrap(); - let as_ref = ac.core_std("convert::AsRef"); - let as_ref_str = as_ref.clone() + ""; - let dyn_as_ref_str = "dyn ".to_string() + &*as_ref_str; - assert!(ac.probe_path(&as_ref)); - assert!(ac.probe_trait(&as_ref_str)); - assert!(ac.probe_type(&as_ref_str)); - ac.assert_min(1, 27, ac.probe_type(&dyn_as_ref_str)); -} - -#[test] -fn probe_i128() { - let ac = AutoCfg::with_dir("target").unwrap(); - let i128_path = ac.core_std("i128"); - ac.assert_min(1, 26, ac.probe_path(&i128_path)); - ac.assert_min(1, 26, ac.probe_type("i128")); -} - -#[test] -fn probe_sum() { - let ac = AutoCfg::with_dir("target").unwrap(); - let sum = ac.core_std("iter::Sum"); - let sum_i32 = sum.clone() + ""; - let dyn_sum_i32 = "dyn ".to_string() + &*sum_i32; - ac.assert_min(1, 12, ac.probe_path(&sum)); - ac.assert_min(1, 12, ac.probe_trait(&sum)); - ac.assert_min(1, 12, ac.probe_trait(&sum_i32)); - ac.assert_min(1, 12, ac.probe_type(&sum_i32)); - ac.assert_min(1, 27, ac.probe_type(&dyn_sum_i32)); -} - -#[test] -fn probe_std() { - let ac = AutoCfg::with_dir("target").unwrap(); - ac.assert_std(ac.probe_sysroot_crate("std")); -} - -#[test] -fn probe_alloc() { - let ac = AutoCfg::with_dir("target").unwrap(); - ac.assert_min(1, 36, ac.probe_sysroot_crate("alloc")); -} - -#[test] -fn probe_bad_sysroot_crate() { - let ac = AutoCfg::with_dir("target").unwrap(); - assert!(!ac.probe_sysroot_crate("doesnt_exist")); -} - -#[test] -fn probe_no_std() { - let ac = AutoCfg::with_dir("target").unwrap(); - assert!(ac.probe_type("i32")); - assert!(ac.probe_type("[i32]")); - ac.assert_std(ac.probe_type("Vec")); -} - -#[test] -fn probe_expression() { - let ac = AutoCfg::with_dir("target").unwrap(); - assert!(ac.probe_expression(r#""test".trim_left()"#)); - ac.assert_min(1, 30, ac.probe_expression(r#""test".trim_start()"#)); - ac.assert_std(ac.probe_expression("[1, 2, 3].to_vec()")); -} - -#[test] -fn probe_constant() { - let ac = AutoCfg::with_dir("target").unwrap(); - assert!(ac.probe_constant("1 + 2 + 3")); - ac.assert_min(1, 33, ac.probe_constant("{ let x = 1 + 2 + 3; x * x }")); - ac.assert_min(1, 39, ac.probe_constant(r#""test".len()"#)); -} diff -Nru cargo-0.44.1/vendor/autocfg/.pc/.version cargo-0.47.0/vendor/autocfg/.pc/.version --- cargo-0.44.1/vendor/autocfg/.pc/.version 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/autocfg/.pc/.version 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -2 diff -Nru cargo-0.44.1/vendor/autocfg/README.md cargo-0.47.0/vendor/autocfg/README.md --- cargo-0.44.1/vendor/autocfg/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/autocfg/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -4,7 +4,7 @@ [![autocfg crate](https://img.shields.io/crates/v/autocfg.svg)](https://crates.io/crates/autocfg) [![autocfg documentation](https://docs.rs/autocfg/badge.svg)](https://docs.rs/autocfg) ![minimum rustc 1.0](https://img.shields.io/badge/rustc-1.0+-red.svg) -[![Travis Status](https://travis-ci.org/cuviper/autocfg.svg?branch=master)](https://travis-ci.org/cuviper/autocfg) +![build status](https://github.com/cuviper/autocfg/workflows/master/badge.svg) A Rust library for build scripts to automatically configure code based on compiler support. Code snippets are dynamically tested to see if the `rustc` @@ -43,6 +43,9 @@ ## Release Notes +- 1.0.1 (2020-08-20) + - Apply `RUSTFLAGS` for more `--target` scenarios, by @adamreichold. + - 1.0.0 (2020-01-08) - 🎉 Release 1.0! 🎉 (no breaking changes) - Add `probe_expression` and `emit_expression_cfg` to test arbitrary expressions. diff -Nru cargo-0.44.1/vendor/autocfg/src/lib.rs cargo-0.47.0/vendor/autocfg/src/lib.rs --- cargo-0.44.1/vendor/autocfg/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/autocfg/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -157,6 +157,8 @@ let rustc: PathBuf = rustc.into(); let rustc_version = try!(Version::from_rustc(&rustc)); + let target = env::var_os("TARGET"); + // Sanity check the output directory let dir = dir.into(); let meta = try!(fs::metadata(&dir).map_err(error::from_io)); @@ -170,7 +172,9 @@ // so for now we only apply RUSTFLAGS when cross-compiling an artifact. // // See https://github.com/cuviper/autocfg/pull/10#issuecomment-527575030. - let rustflags = if env::var_os("TARGET") != env::var_os("HOST") { + let rustflags = if target != env::var_os("HOST") + || dir_contains_target(&target, &dir, env::var_os("CARGO_TARGET_DIR")) + { env::var("RUSTFLAGS").ok().map(|rustflags| { // This is meant to match how cargo handles the RUSTFLAG environment // variable. @@ -190,7 +194,7 @@ out_dir: dir, rustc: rustc, rustc_version: rustc_version, - target: env::var_os("TARGET"), + target: target, no_std: false, rustflags: rustflags, }; @@ -410,3 +414,25 @@ }) .collect() } + +fn dir_contains_target( + target: &Option, + dir: &PathBuf, + cargo_target_dir: Option, +) -> bool { + target + .as_ref() + .and_then(|target| { + dir.to_str().and_then(|dir| { + let mut cargo_target_dir = cargo_target_dir + .map(PathBuf::from) + .unwrap_or_else(|| PathBuf::from("target")); + cargo_target_dir.push(target); + + cargo_target_dir + .to_str() + .map(|cargo_target_dir| dir.contains(&cargo_target_dir)) + }) + }) + .unwrap_or(false) +} diff -Nru cargo-0.44.1/vendor/autocfg/src/tests.rs cargo-0.47.0/vendor/autocfg/src/tests.rs --- cargo-0.44.1/vendor/autocfg/src/tests.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/autocfg/src/tests.rs 2020-10-01 21:38:28.000000000 +0000 @@ -131,3 +131,39 @@ ac.assert_min(1, 33, ac.probe_constant("{ let x = 1 + 2 + 3; x * x }")); ac.assert_min(1, 39, ac.probe_constant(r#""test".len()"#)); } + +#[test] +fn dir_does_not_contain_target() { + assert!(!super::dir_contains_target( + &Some("x86_64-unknown-linux-gnu".into()), + &"/project/target/debug/build/project-ea75983148559682/out".into(), + None, + )); +} + +#[test] +fn dir_does_contain_target() { + assert!(super::dir_contains_target( + &Some("x86_64-unknown-linux-gnu".into()), + &"/project/target/x86_64-unknown-linux-gnu/debug/build/project-0147aca016480b9d/out".into(), + None, + )); +} + +#[test] +fn dir_does_not_contain_target_with_custom_target_dir() { + assert!(!super::dir_contains_target( + &Some("x86_64-unknown-linux-gnu".into()), + &"/project/custom/debug/build/project-ea75983148559682/out".into(), + Some("custom".into()), + )); +} + +#[test] +fn dir_does_contain_target_with_custom_target_dir() { + assert!(super::dir_contains_target( + &Some("x86_64-unknown-linux-gnu".into()), + &"/project/custom/x86_64-unknown-linux-gnu/debug/build/project-0147aca016480b9d/out".into(), + Some("custom".into()), + )); +} diff -Nru cargo-0.44.1/vendor/cc/.cargo-checksum.json cargo-0.47.0/vendor/cc/.cargo-checksum.json --- cargo-0.44.1/vendor/cc/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/cc/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"7bbb73db36c1246e9034e307d0fba23f9a2e251faa47ade70c1bd252220c8311"} \ No newline at end of file +{"files":{},"package":"ef611cc68ff783f18535d77ddd080185275713d852c4f5cbb6122c462a7a825c"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/cc/Cargo.lock cargo-0.47.0/vendor/cc/Cargo.lock --- cargo-0.44.1/vendor/cc/Cargo.lock 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/cc/Cargo.lock 2020-10-01 21:38:28.000000000 +0000 @@ -1,156 +1,145 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. [[package]] -name = "c2-chacha" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] name = "cc" -version = "1.0.54" +version = "1.0.60" dependencies = [ - "jobserver 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", - "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jobserver", + "tempfile", ] [[package]] name = "cfg-if" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" [[package]] name = "getrandom" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "libc", + "wasi", ] [[package]] name = "jobserver" -version = "0.1.18" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", ] [[package]] name = "libc" -version = "0.2.66" +version = "0.2.77" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f96b10ec2560088a8e76961b00d47107b3a625fecb76dedb29ee7ccbf98235" [[package]] name = "ppv-lite86" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c36fa947111f5c62a733b652544dd0016a43ce89619538a8ef92724a6f501a20" [[package]] name = "rand" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom", + "libc", + "rand_chacha", + "rand_core", + "rand_hc", ] [[package]] name = "rand_chacha" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ - "c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ppv-lite86", + "rand_core", ] [[package]] name = "rand_core" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom", ] [[package]] name = "rand_hc" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core", ] [[package]] name = "redox_syscall" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "remove_dir_all" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi", ] [[package]] name = "tempfile" version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "libc", + "rand", + "redox_syscall", + "remove_dir_all", + "winapi", ] [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "winapi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ - "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" - -[metadata] -"checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb" -"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" -"checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" -"checksum jobserver 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "230ae9adf468173aecd4176c7233bddc84a15871a586c5971ace9a55f881c075" -"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" -"checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" -"checksum rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3ae1b169243eaf61759b8475a998f0a385e42042370f3a7dbaf35246eacc8412" -"checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853" -"checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -"checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" -"checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" -"checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" -"checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" -"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff -Nru cargo-0.44.1/vendor/cc/Cargo.toml cargo-0.47.0/vendor/cc/Cargo.toml --- cargo-0.44.1/vendor/cc/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/cc/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "cc" -version = "1.0.54" +version = "1.0.60" authors = ["Alex Crichton "] exclude = ["/.travis.yml", "/appveyor.yml"] description = "A build-time dependency for Cargo build scripts to assist in invoking the native\nC compiler to compile native C code into a static archive to be linked into Rust\ncode.\n" diff -Nru cargo-0.44.1/vendor/cc/.pc/.quilt_patches cargo-0.47.0/vendor/cc/.pc/.quilt_patches --- cargo-0.44.1/vendor/cc/.pc/.quilt_patches 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/cc/.pc/.quilt_patches 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -debian/patches diff -Nru cargo-0.44.1/vendor/cc/.pc/.quilt_series cargo-0.47.0/vendor/cc/.pc/.quilt_series --- cargo-0.44.1/vendor/cc/.pc/.quilt_series 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/cc/.pc/.quilt_series 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -series diff -Nru cargo-0.44.1/vendor/cc/.pc/.version cargo-0.47.0/vendor/cc/.pc/.version --- cargo-0.44.1/vendor/cc/.pc/.version 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/cc/.pc/.version 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -2 diff -Nru cargo-0.44.1/vendor/cc/README.md cargo-0.47.0/vendor/cc/README.md --- cargo-0.44.1/vendor/cc/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/cc/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -8,9 +8,6 @@ order to build a set of C/C++ files into a static archive. This crate calls out to the most relevant compiler for a platform, for example using `cl` on MSVC. -> **Note**: this crate was recently renamed from the `gcc` crate, so if you're -> looking for the `gcc` crate you're in the right spot! - ## Using cc-rs First, you'll want to both add a build script for your crate (`build.rs`) and @@ -39,7 +36,7 @@ named libfoo.a. You can call the functions in Rust by declaring functions in your Rust code like so: -``` +```rust,no_run extern { fn foo_function(); fn bar_function(); @@ -150,6 +147,7 @@ When using C++ library compilation switch, the `CXX` and `CXXFLAGS` env variables are used instead of `CC` and `CFLAGS` and the C++ standard library is linked to the crate target. +Remember that C++ does name mangling so `extern "C"` might be required to enable rust linker to find your functions. ## CUDA C++ support diff -Nru cargo-0.44.1/vendor/cc/src/lib.rs cargo-0.47.0/vendor/cc/src/lib.rs --- cargo-0.44.1/vendor/cc/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/cc/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -120,6 +120,7 @@ warnings: Option, extra_warnings: Option, env_cache: Arc>>>, + apple_sdk_root_cache: Arc>>, } /// Represents the types of errors that may occur while using cc-rs. @@ -312,6 +313,7 @@ extra_warnings: None, warnings_into_errors: false, env_cache: Arc::new(Mutex::new(HashMap::new())), + apple_sdk_root_cache: Arc::new(Mutex::new(HashMap::new())), } } @@ -414,7 +416,7 @@ /// For a convenience method for setting flags conditionally, /// see `flag_if_supported()`. /// - /// It may return error if it's unable to run the compilier with a test file + /// It may return error if it's unable to run the compiler with a test file /// (e.g. the compiler is missing or a write to the `out_dir` failed). /// /// Note: Once computed, the result of this call is stored in the @@ -968,7 +970,7 @@ /// Run the compiler, generating the file `output` /// /// The name `output` should be the name of the library. For backwards compatibility, - /// the `output` may start with `lib` and end with `.a`. The Rust compilier will create + /// the `output` may start with `lib` and end with `.a`. The Rust compiler will create /// the assembly with the lib prefix and .a extension. MSVC will create a file without prefix, /// ending with `.lib`. /// @@ -1172,6 +1174,9 @@ cmd.arg("-c"); } cmd.arg(&obj.src); + if cfg!(target_os = "macos") { + self.fix_env_for_apple_os(&mut cmd)?; + } run(&mut cmd, &name)?; Ok(()) @@ -1418,20 +1423,29 @@ if !(target.contains("android") && android_clang_compiler_uses_target_arg_internally(&cmd.path)) { - cmd.args.push(format!("--target={}", target).into()); + if target.contains("darwin") { + if let Some(arch) = + map_darwin_target_from_rust_to_compiler_architecture(target) + { + cmd.args + .push(format!("--target={}-apple-darwin", arch).into()); + } + } else { + cmd.args.push(format!("--target={}", target).into()); + } } } ToolFamily::Msvc { clang_cl } => { // This is an undocumented flag from MSVC but helps with making // builds more reproducible by avoiding putting timestamps into // files. - cmd.args.push("-Brepro".into()); + cmd.push_cc_arg("-Brepro".into()); if clang_cl { if target.contains("x86_64") { - cmd.args.push("-m64".into()); + cmd.push_cc_arg("-m64".into()); } else if target.contains("86") { - cmd.args.push("-m32".into()); + cmd.push_cc_arg("-m32".into()); cmd.push_cc_arg("-arch:IA32".into()); } else { cmd.push_cc_arg(format!("--target={}", target).into()); @@ -1465,6 +1479,14 @@ cmd.args.push("-m64".into()); } + if target.contains("darwin") { + if let Some(arch) = map_darwin_target_from_rust_to_compiler_architecture(target) + { + cmd.args.push("-arch".into()); + cmd.args.push(arch.into()); + } + } + if self.static_flag.is_none() { let features = self .getenv("CARGO_CFG_TARGET_FEATURE") @@ -1687,14 +1709,19 @@ "ml.exe" }; let mut cmd = windows_registry::find(&target, tool).unwrap_or_else(|| self.cmd(tool)); + cmd.arg("-nologo"); // undocumented, yet working with armasm[64] for directory in self.include_directories.iter() { cmd.arg("-I").arg(directory); } - for &(ref key, ref value) in self.definitions.iter() { - if let Some(ref value) = *value { - cmd.arg(&format!("-D{}={}", key, value)); - } else { - cmd.arg(&format!("-D{}", key)); + if target.contains("aarch64") || target.contains("arm") { + println!("cargo:warning=The MSVC ARM assemblers do not support -D flags"); + } else { + for &(ref key, ref value) in self.definitions.iter() { + if let Some(ref value) = *value { + cmd.arg(&format!("-D{}={}", key, value)); + } else { + cmd.arg(&format!("-D{}", key)); + } } } @@ -1837,6 +1864,7 @@ let arch = match arch { "arm" | "armv7" | "thumbv7" => ArchSpec::Device("armv7"), "armv7s" | "thumbv7s" => ArchSpec::Device("armv7s"), + "arm64e" => ArchSpec::Device("arm64e"), "arm64" | "aarch64" => ArchSpec::Device("arm64"), "i386" | "i686" => ArchSpec::Simulator("-m32"), "x86_64" => ArchSpec::Simulator("-m64"), @@ -1868,27 +1896,9 @@ }; self.print(&format!("Detecting iOS SDK path for {}", sdk)); - let sdk_path = self - .cmd("xcrun") - .arg("--show-sdk-path") - .arg("--sdk") - .arg(sdk) - .stderr(Stdio::inherit()) - .output()? - .stdout; - - let sdk_path = match String::from_utf8(sdk_path) { - Ok(p) => p, - Err(_) => { - return Err(Error::new( - ErrorKind::IOError, - "Unable to determine iOS SDK path.", - )); - } - }; - + let sdk_path = self.apple_sdk_root(sdk)?; cmd.args.push("-isysroot".into()); - cmd.args.push(sdk_path.trim().into()); + cmd.args.push(sdk_path); cmd.args.push("-fembed-bitcode".into()); /* * TODO we probably ultimately want the -fembed-bitcode-marker flag @@ -1996,7 +2006,11 @@ { "clang".to_string() } else if target.contains("vxworks") { - "wr-c++".to_string() + if self.cpp { + "wr-c++".to_string() + } else { + "wr-cc".to_string() + } } else if self.get_host()? != target { let prefix = self.prefix_for_target(&target); match prefix { @@ -2263,6 +2277,7 @@ "arm-unknown-linux-musleabihf" => Some("arm-linux-musleabihf"), "arm-unknown-netbsd-eabi" => Some("arm--netbsdelf-eabi"), "armv6-unknown-netbsd-eabihf" => Some("armv6--netbsdelf-eabihf"), + "armv7-unknown-linux-gnueabi" => Some("arm-linux-gnueabi"), "armv7-unknown-linux-gnueabihf" => Some("arm-linux-gnueabihf"), "armv7-unknown-linux-musleabihf" => Some("arm-linux-musleabihf"), "armv7neon-unknown-linux-gnueabihf" => Some("arm-linux-gnueabihf"), @@ -2272,6 +2287,7 @@ "thumbv7neon-unknown-linux-gnueabihf" => Some("arm-linux-gnueabihf"), "thumbv7neon-unknown-linux-musleabihf" => Some("arm-linux-musleabihf"), "armv7-unknown-netbsd-eabihf" => Some("armv7--netbsdelf-eabihf"), + "hexagon-unknown-linux-musl" => Some("hexagon-linux-musl"), "i586-unknown-linux-musl" => Some("musl"), "i686-pc-windows-gnu" => Some("i686-w64-mingw32"), "i686-uwp-windows-gnu" => Some("i686-w64-mingw32"), @@ -2290,11 +2306,31 @@ "powerpc-unknown-netbsd" => Some("powerpc--netbsd"), "powerpc64-unknown-linux-gnu" => Some("powerpc-linux-gnu"), "powerpc64le-unknown-linux-gnu" => Some("powerpc64le-linux-gnu"), - "riscv32i-unknown-none-elf" => Some("riscv32-unknown-elf"), - "riscv32imac-unknown-none-elf" => Some("riscv32-unknown-elf"), - "riscv32imc-unknown-none-elf" => Some("riscv32-unknown-elf"), - "riscv64gc-unknown-none-elf" => Some("riscv64-unknown-elf"), - "riscv64imac-unknown-none-elf" => Some("riscv64-unknown-elf"), + "riscv32i-unknown-none-elf" => self.find_working_gnu_prefix(&[ + "riscv32-unknown-elf", + "riscv64-unknown-elf", + "riscv-none-embed", + ]), + "riscv32imac-unknown-none-elf" => self.find_working_gnu_prefix(&[ + "riscv32-unknown-elf", + "riscv64-unknown-elf", + "riscv-none-embed", + ]), + "riscv32imc-unknown-none-elf" => self.find_working_gnu_prefix(&[ + "riscv32-unknown-elf", + "riscv64-unknown-elf", + "riscv-none-embed", + ]), + "riscv64gc-unknown-none-elf" => self.find_working_gnu_prefix(&[ + "riscv64-unknown-elf", + "riscv32-unknown-elf", + "riscv-none-embed", + ]), + "riscv64imac-unknown-none-elf" => self.find_working_gnu_prefix(&[ + "riscv64-unknown-elf", + "riscv32-unknown-elf", + "riscv-none-embed", + ]), "riscv64gc-unknown-linux-gnu" => Some("riscv64-linux-gnu"), "s390x-unknown-linux-gnu" => Some("s390x-linux-gnu"), "sparc-unknown-linux-gnu" => Some("sparc-linux-gnu"), @@ -2324,6 +2360,38 @@ .map(|x| x.to_owned())) } + /// Some platforms have multiple, compatible, canonical prefixes. Look through + /// each possible prefix for a compiler that exists and return it. The prefixes + /// should be ordered from most-likely to least-likely. + fn find_working_gnu_prefix(&self, prefixes: &[&'static str]) -> Option<&'static str> { + let suffix = if self.cpp { "-g++" } else { "-gcc" }; + let extension = std::env::consts::EXE_SUFFIX; + + // Loop through PATH entries searching for each toolchain. This ensures that we + // are more likely to discover the toolchain early on, because chances are good + // that the desired toolchain is in one of the higher-priority paths. + env::var_os("PATH") + .as_ref() + .and_then(|path_entries| { + env::split_paths(path_entries).find_map(|path_entry| { + for prefix in prefixes { + let target_compiler = format!("{}{}{}", prefix, suffix, extension); + if path_entry.join(&target_compiler).exists() { + return Some(prefix); + } + } + None + }) + }) + .map(|prefix| *prefix) + .or_else(|| + // If no toolchain was found, provide the first toolchain that was passed in. + // This toolchain has been shown not to exist, however it will appear in the + // error that is shown to the user which should make it easier to search for + // where it should be obtained. + prefixes.first().map(|prefix| *prefix)) + } + fn get_target(&self) -> Result { match self.target.clone() { Some(t) => Ok(t), @@ -2394,6 +2462,63 @@ println!("{}", s); } } + + fn fix_env_for_apple_os(&self, cmd: &mut Command) -> Result<(), Error> { + let target = self.get_target()?; + let host = self.get_host()?; + if host.contains("apple-darwin") && target.contains("apple-darwin") { + // If, for example, `cargo` runs during the build of an XCode project, then `SDKROOT` environment variable + // would represent the current target, and this is the problem for us, if we want to compile something + // for the host, when host != target. + // We can not just remove `SDKROOT`, because, again, for example, XCode add to PATH + // /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin + // and `cc` from this path can not find system include files, like `pthread.h`, if `SDKROOT` + // is not set + if let Ok(sdkroot) = env::var("SDKROOT") { + if !sdkroot.contains("MacOSX") { + let macos_sdk = self.apple_sdk_root("macosx")?; + cmd.env("SDKROOT", macos_sdk); + } + } + // Additionally, `IPHONEOS_DEPLOYMENT_TARGET` must not be set when using the Xcode linker at + // "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/ld", + // although this is apparently ignored when using the linker at "/usr/bin/ld". + cmd.env_remove("IPHONEOS_DEPLOYMENT_TARGET"); + } + Ok(()) + } + + fn apple_sdk_root(&self, sdk: &str) -> Result { + let mut cache = self + .apple_sdk_root_cache + .lock() + .expect("apple_sdk_root_cache lock failed"); + if let Some(ret) = cache.get(sdk) { + return Ok(ret.clone()); + } + + let sdk_path = self + .cmd("xcrun") + .arg("--show-sdk-path") + .arg("--sdk") + .arg(sdk) + .stderr(Stdio::inherit()) + .output()? + .stdout; + + let sdk_path = match String::from_utf8(sdk_path) { + Ok(p) => p, + Err(_) => { + return Err(Error::new( + ErrorKind::IOError, + "Unable to determine iOS SDK path.", + )); + } + }; + let ret: OsString = sdk_path.trim().into(); + cache.insert(sdk.into(), ret.clone()); + Ok(ret) + } } impl Default for Build { @@ -2766,10 +2891,33 @@ // So to construct proper command line check if // `--target` argument would be passed or not to clang fn android_clang_compiler_uses_target_arg_internally(clang_path: &Path) -> bool { - NEW_STANDALONE_ANDROID_COMPILERS.iter().any(|x| { - let x: &OsStr = x.as_ref(); - x == clang_path.as_os_str() - }) + if let Some(filename) = clang_path.file_name() { + if let Some(filename_str) = filename.to_str() { + filename_str.contains("android") + } else { + false + } + } else { + false + } +} + +#[test] +fn test_android_clang_compiler_uses_target_arg_internally() { + for version in 16..21 { + assert!(android_clang_compiler_uses_target_arg_internally( + &PathBuf::from(format!("armv7a-linux-androideabi{}-clang", version)) + )); + assert!(android_clang_compiler_uses_target_arg_internally( + &PathBuf::from(format!("armv7a-linux-androideabi{}-clang++", version)) + )); + } + assert!(!android_clang_compiler_uses_target_arg_internally( + &PathBuf::from("clang") + )); + assert!(!android_clang_compiler_uses_target_arg_internally( + &PathBuf::from("clang++") + )); } fn autodetect_android_compiler(target: &str, host: &str, gnu: &str, clang: &str) -> String { @@ -2818,3 +2966,16 @@ clang_compiler } } + +// Rust and clang/cc don't agree on how to name the target. +fn map_darwin_target_from_rust_to_compiler_architecture(target: &str) -> Option<&'static str> { + if target.contains("x86_64") { + Some("x86_64") + } else if target.contains("arm64e") { + Some("arm64e") + } else if target.contains("aarch64") { + Some("arm64") + } else { + None + } +} diff -Nru cargo-0.44.1/vendor/cc/src/windows_registry.rs cargo-0.47.0/vendor/cc/src/windows_registry.rs --- cargo-0.44.1/vendor/cc/src/windows_registry.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/cc/src/windows_registry.rs 2020-10-01 21:38:28.000000000 +0000 @@ -347,6 +347,7 @@ }; let mut tool = MsvcTool::new(tool_path); + tool.path.push(bin_path.clone()); tool.path.push(host_dylib_path); tool.libs.push(lib_path); tool.include.push(include_path); @@ -375,6 +376,9 @@ let host = match host_arch() { X86 => "X86", X86_64 => "X64", + // There is no natively hosted compiler on ARM64. + // Instead, use the x86 toolchain under emulation (there is no x64 emulation). + AARCH64 => "X86", _ => return None, }; let target = lib_subdir(target)?; @@ -421,8 +425,15 @@ let sub = lib_subdir(target)?; let (ucrt, ucrt_version) = get_ucrt_dir()?; + let host = match host_arch() { + X86 => "x86", + X86_64 => "x64", + AARCH64 => "arm64", + _ => return None, + }; + tool.path - .push(ucrt.join("bin").join(&ucrt_version).join(sub)); + .push(ucrt.join("bin").join(&ucrt_version).join(host)); let ucrt_include = ucrt.join("include").join(&ucrt_version); tool.include.push(ucrt_include.join("ucrt")); @@ -431,7 +442,7 @@ tool.libs.push(ucrt_lib.join("ucrt").join(sub)); if let Some((sdk, version)) = get_sdk10_dir() { - tool.path.push(sdk.join("bin").join(sub)); + tool.path.push(sdk.join("bin").join(host)); let sdk_lib = sdk.join("lib").join(&version); tool.libs.push(sdk_lib.join("um").join(sub)); let sdk_include = sdk.join("include").join(&version); @@ -440,7 +451,7 @@ tool.include.push(sdk_include.join("winrt")); tool.include.push(sdk_include.join("shared")); } else if let Some(sdk) = get_sdk81_dir() { - tool.path.push(sdk.join("bin").join(sub)); + tool.path.push(sdk.join("bin").join(host)); let sdk_lib = sdk.join("lib").join("winv6.3"); tool.libs.push(sdk_lib.join("um").join(sub)); let sdk_include = sdk.join("include"); @@ -607,8 +618,10 @@ const PROCESSOR_ARCHITECTURE_INTEL: u16 = 0; const PROCESSOR_ARCHITECTURE_AMD64: u16 = 9; + const PROCESSOR_ARCHITECTURE_ARM64: u16 = 12; const X86: u16 = PROCESSOR_ARCHITECTURE_INTEL; const X86_64: u16 = PROCESSOR_ARCHITECTURE_AMD64; + const AARCH64: u16 = PROCESSOR_ARCHITECTURE_ARM64; // When choosing the tool to use, we have to choose the one which matches // the target architecture. Otherwise we end up in situations where someone diff -Nru cargo-0.44.1/vendor/clap/.cargo-checksum.json cargo-0.47.0/vendor/clap/.cargo-checksum.json --- cargo-0.44.1/vendor/clap/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/clap/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"bdfa80d47f954d53a35a64987ca1422f495b8d6483c0fe9f7117b36c2a792129"} \ No newline at end of file +{"files":{},"package":"37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/clap/Cargo.toml cargo-0.47.0/vendor/clap/Cargo.toml --- cargo-0.44.1/vendor/clap/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/clap/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "clap" -version = "2.33.1" +version = "2.33.3" authors = ["Kevin K. "] exclude = ["examples/*", "clap-test/*", "tests/*", "benches/*", "*.png", "clap-perf/*", "*.dot"] description = "A simple to use, efficient, and full-featured Command Line Argument Parser\n" diff -Nru cargo-0.44.1/vendor/clap/CHANGELOG.md cargo-0.47.0/vendor/clap/CHANGELOG.md --- cargo-0.44.1/vendor/clap/CHANGELOG.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/clap/CHANGELOG.md 2020-10-01 21:38:28.000000000 +0000 @@ -1,3 +1,17 @@ + +### v2.33.3 (2020-08-13) + +#### Improvements + +* Suppress deprecation warnings when using `crate_*` macros. + + +### v2.33.2 (2020-08-5) + +#### Documentation + +* Fixed links to `2.x` examples. Now they point to the right place. + ### v2.33.1 (2020-05-11) diff -Nru cargo-0.44.1/vendor/clap/.pc/no-clippy.patch/Cargo.toml cargo-0.47.0/vendor/clap/.pc/no-clippy.patch/Cargo.toml --- cargo-0.44.1/vendor/clap/.pc/no-clippy.patch/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/clap/.pc/no-clippy.patch/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "clap" -version = "2.33.1" +version = "2.33.3" authors = ["Kevin K. "] exclude = ["examples/*", "clap-test/*", "tests/*", "benches/*", "*.png", "clap-perf/*", "*.dot"] description = "A simple to use, efficient, and full-featured Command Line Argument Parser\n" diff -Nru cargo-0.44.1/vendor/clap/.pc/relax-dep-versions.patch/Cargo.toml cargo-0.47.0/vendor/clap/.pc/relax-dep-versions.patch/Cargo.toml --- cargo-0.44.1/vendor/clap/.pc/relax-dep-versions.patch/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/clap/.pc/relax-dep-versions.patch/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "clap" -version = "2.33.1" +version = "2.33.3" authors = ["Kevin K. "] exclude = ["examples/*", "clap-test/*", "tests/*", "benches/*", "*.png", "clap-perf/*", "*.dot"] description = "A simple to use, efficient, and full-featured Command Line Argument Parser\n" diff -Nru cargo-0.44.1/vendor/clap/src/app/mod.rs cargo-0.47.0/vendor/clap/src/app/mod.rs --- cargo-0.44.1/vendor/clap/src/app/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/clap/src/app/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -156,8 +156,8 @@ /// # } /// ``` /// [`App`]: ./struct.App.html - /// [`examples/17_yaml.rs`]: https://github.com/clap-rs/clap/blob/master/examples/17_yaml.rs - /// [`examples/17_yaml.yml`]: https://github.com/clap-rs/clap/blob/master/examples/17_yaml.yml + /// [`examples/17_yaml.rs`]: https://github.com/clap-rs/clap/blob/v2.33.1/examples/17_yaml.rs + /// [`examples/17_yaml.yml`]: https://github.com/clap-rs/clap/blob/v2.33.1/examples/17_yaml.yml /// [`panic!`]: https://doc.rust-lang.org/std/macro.panic!.html #[cfg(feature = "yaml")] pub fn from_yaml(yaml: &'a Yaml) -> App<'a, 'a> { @@ -183,7 +183,7 @@ /// # ; /// ``` /// [`crate_authors!`]: ./macro.crate_authors!.html - /// [`examples/`]: https://github.com/clap-rs/clap/tree/master/examples + /// [`examples/`]: https://github.com/clap-rs/clap/tree/v2.33.1/examples pub fn author>(mut self, author: S) -> Self { self.p.meta.author = Some(author.into()); self @@ -341,7 +341,7 @@ /// # ; /// ``` /// [`crate_version!`]: ./macro.crate_version!.html - /// [`examples/`]: https://github.com/clap-rs/clap/tree/master/examples + /// [`examples/`]: https://github.com/clap-rs/clap/tree/v2.33.1/examples /// [`App::long_version`]: ./struct.App.html#method.long_version pub fn version>(mut self, ver: S) -> Self { self.p.meta.version = Some(ver.into()); @@ -372,7 +372,7 @@ /// # ; /// ``` /// [`crate_version!`]: ./macro.crate_version!.html - /// [`examples/`]: https://github.com/clap-rs/clap/tree/master/examples + /// [`examples/`]: https://github.com/clap-rs/clap/tree/v2.33.1/examples /// [`App::version`]: ./struct.App.html#method.version pub fn long_version>(mut self, ver: S) -> Self { self.p.meta.long_version = Some(ver.into()); diff -Nru cargo-0.44.1/vendor/clap/src/lib.rs cargo-0.47.0/vendor/clap/src/lib.rs --- cargo-0.44.1/vendor/clap/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/clap/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,4 @@ -// Copyright ⓒ 2015-2016 Kevin B. Knapp and [`clap-rs` contributors](https://github.com/clap-rs/clap/blob/master/CONTRIBUTORS.md). +// Copyright ⓒ 2015-2016 Kevin B. Knapp and [`clap-rs` contributors](https://github.com/clap-rs/clap/blob/v2.33.1/CONTRIBUTORS.md). // Licensed under the MIT license // (see LICENSE or ) All files in the project carrying such // notice may not be copied, modified, or distributed except according to those terms. @@ -366,7 +366,7 @@ //! * **Red** Color: **NOT** included by default (must use cargo `features` to enable) //! * **Blue** Color: Dev dependency, only used while developing. //! -//! ![clap dependencies](https://raw.githubusercontent.com/clap-rs/clap/master/clap_dep_graph.png) +//! ![clap dependencies](https://github.com/clap-rs/clap/blob/v2.33.1/clap_dep_graph.png) //! //! ### More Information //! @@ -391,7 +391,7 @@ //! `clap`. You can either add it to the [examples/] directory, or file an issue and tell //! me. I'm all about giving credit where credit is due :) //! -//! Please read [CONTRIBUTING.md](https://raw.githubusercontent.com/clap-rs/clap/master/.github/CONTRIBUTING.md) before you start contributing. +//! Please read [CONTRIBUTING.md](https://github.com/clap-rs/clap/blob/v2.33.1/.github/CONTRIBUTING.md) before you start contributing. //! //! //! ### Testing Code @@ -512,12 +512,12 @@ //! `clap` is licensed under the MIT license. Please read the [LICENSE-MIT][license] file in //! this repository for more information. //! -//! [examples/]: https://github.com/clap-rs/clap/tree/master/examples +//! [examples/]: https://github.com/clap-rs/clap/tree/v2.33.1/examples //! [video tutorials]: https://www.youtube.com/playlist?list=PLza5oFLQGTl2Z5T8g1pRkIynR3E0_pc7U -//! [license]: https://raw.githubusercontent.com/clap-rs/clap/master/LICENSE-MIT +//! [license]: https://github.com/clap-rs/clap/blob/v2.33.1/LICENSE-MIT #![crate_type = "lib"] -#![doc(html_root_url = "https://docs.rs/clap/2.33.1")] +#![doc(html_root_url = "https://docs.rs/clap/2.33.3")] #![deny( missing_docs, missing_debug_implementations, diff -Nru cargo-0.44.1/vendor/clap/src/macros.rs cargo-0.47.0/vendor/clap/src/macros.rs --- cargo-0.44.1/vendor/clap/src/macros.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/clap/src/macros.rs 2020-10-01 21:38:28.000000000 +0000 @@ -466,6 +466,7 @@ macro_rules! crate_authors { ($sep:expr) => {{ use std::ops::Deref; + #[allow(deprecated)] use std::sync::{Once, ONCE_INIT}; #[allow(missing_copy_implementations)] @@ -479,6 +480,7 @@ #[allow(unsafe_code)] fn deref(&self) -> &'static str { + #[allow(deprecated)] static ONCE: Once = ONCE_INIT; static mut VALUE: *const String = 0 as *const String; diff -Nru cargo-0.44.1/vendor/core-foundation/.cargo-checksum.json cargo-0.47.0/vendor/core-foundation/.cargo-checksum.json --- cargo-0.44.1/vendor/core-foundation/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171"} \ No newline at end of file +{"files":{},"package":"0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/core-foundation/Cargo.toml cargo-0.47.0/vendor/core-foundation/Cargo.toml --- cargo-0.44.1/vendor/core-foundation/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "core-foundation" -version = "0.7.0" +version = "0.9.1" authors = ["The Servo Project Developers"] description = "Bindings to Core Foundation for macOS" homepage = "https://github.com/servo/core-foundation-rs" @@ -20,12 +20,14 @@ categories = ["os::macos-apis"] license = "MIT / Apache-2.0" repository = "https://github.com/servo/core-foundation-rs" +[package.metadata.docs.rs] +default-target = "x86_64-apple-darwin" [dependencies.chrono] version = "0.4" optional = true [dependencies.core-foundation-sys] -version = "0.7" +version = "0.8.0" [dependencies.libc] version = "0.2" diff -Nru cargo-0.44.1/vendor/core-foundation/.pc/update-dep-uuid-version.patch/Cargo.toml cargo-0.47.0/vendor/core-foundation/.pc/update-dep-uuid-version.patch/Cargo.toml --- cargo-0.44.1/vendor/core-foundation/.pc/update-dep-uuid-version.patch/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation/.pc/update-dep-uuid-version.patch/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "core-foundation" -version = "0.7.0" +version = "0.9.1" authors = ["The Servo Project Developers"] description = "Bindings to Core Foundation for macOS" homepage = "https://github.com/servo/core-foundation-rs" @@ -20,12 +20,14 @@ categories = ["os::macos-apis"] license = "MIT / Apache-2.0" repository = "https://github.com/servo/core-foundation-rs" +[package.metadata.docs.rs] +default-target = "x86_64-apple-darwin" [dependencies.chrono] version = "0.4" optional = true [dependencies.core-foundation-sys] -version = "0.7" +version = "0.8.0" [dependencies.libc] version = "0.2" diff -Nru cargo-0.44.1/vendor/core-foundation/src/attributed_string.rs cargo-0.47.0/vendor/core-foundation/src/attributed_string.rs --- cargo-0.44.1/vendor/core-foundation/src/attributed_string.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation/src/attributed_string.rs 2020-10-01 21:38:28.000000000 +0000 @@ -41,7 +41,7 @@ declare_TCFType!{ CFMutableAttributedString, CFMutableAttributedStringRef } -impl_TCFType!(CFMutableAttributedString, CFMutableAttributedStringRef, CFMutableAttributedStringGetTypeID); +impl_TCFType!(CFMutableAttributedString, CFMutableAttributedStringRef, CFAttributedStringGetTypeID); impl CFMutableAttributedString { #[inline] @@ -70,7 +70,7 @@ } #[inline] - pub fn set_attribute(&mut self, range: CFRange, name: CFStringRef, value: T) { + pub fn set_attribute(&mut self, range: CFRange, name: CFStringRef, value: &T) { unsafe { CFAttributedStringSetAttribute( self.0, range, name, value.as_CFTypeRef()); @@ -83,3 +83,16 @@ Self::new() } } + + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn attributed_string_type_id_comparison() { + // CFMutableAttributedString TypeID must be equal to CFAttributedString TypeID. + // Compilation must not fail. + assert_eq!(::type_id(), ::type_id()); + } +} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/core-foundation/src/base.rs cargo-0.47.0/vendor/core-foundation/src/base.rs --- cargo-0.44.1/vendor/core-foundation/src/base.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation/src/base.rs 2020-10-01 21:38:28.000000000 +0000 @@ -243,6 +243,7 @@ #[inline] unsafe fn wrap_under_get_rule(reference: CFTypeRef) -> CFType { + assert!(!reference.is_null(), "Attempted to create a NULL object."); let reference: CFTypeRef = CFRetain(reference); TCFType::wrap_under_create_rule(reference) } @@ -254,6 +255,7 @@ #[inline] unsafe fn wrap_under_create_rule(obj: CFTypeRef) -> CFType { + assert!(!obj.is_null(), "Attempted to create a NULL object."); CFType(obj) } diff -Nru cargo-0.44.1/vendor/core-foundation/src/bundle.rs cargo-0.47.0/vendor/core-foundation/src/bundle.rs --- cargo-0.44.1/vendor/core-foundation/src/bundle.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation/src/bundle.rs 2020-10-01 21:38:28.000000000 +0000 @@ -9,8 +9,10 @@ //! Core Foundation Bundle Type -pub use core_foundation_sys::bundle::*; use core_foundation_sys::base::kCFAllocatorDefault; +pub use core_foundation_sys::bundle::*; +use core_foundation_sys::url::kCFURLPOSIXPathStyle; +use std::path::PathBuf; use base::{CFType, TCFType}; use url::CFURL; @@ -79,6 +81,24 @@ } } + /// Bundle's own location + pub fn bundle_url(&self) -> Option { + unsafe { + let bundle_url = CFBundleCopyBundleURL(self.0); + if bundle_url.is_null() { + None + } else { + Some(TCFType::wrap_under_create_rule(bundle_url)) + } + } + } + + /// Bundle's own location + pub fn path(&self) -> Option { + let url = self.bundle_url()?; + Some(PathBuf::from(url.get_file_system_path(kCFURLPOSIXPathStyle).to_string())) + } + pub fn private_frameworks_url(&self) -> Option { unsafe { let fw_url = CFBundleCopyPrivateFrameworksURL(self.0); diff -Nru cargo-0.44.1/vendor/core-foundation/src/lib.rs cargo-0.47.0/vendor/core-foundation/src/lib.rs --- cargo-0.44.1/vendor/core-foundation/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -95,6 +95,7 @@ #[inline] unsafe fn wrap_under_get_rule(reference: $ty_ref) -> Self { + assert!(!reference.is_null(), "Attempted to create a NULL object."); let reference = $crate::base::CFRetain(reference as *const ::std::os::raw::c_void) as $ty_ref; $crate::base::TCFType::wrap_under_create_rule(reference) } @@ -106,6 +107,7 @@ #[inline] unsafe fn wrap_under_create_rule(reference: $ty_ref) -> Self { + assert!(!reference.is_null(), "Attempted to create a NULL object."); // we need one PhantomData for each type parameter so call ourselves // again with @Phantom $p to produce that $ty(reference $(, impl_TCFType!(@Phantom $p))*) diff -Nru cargo-0.44.1/vendor/core-foundation/src/propertylist.rs cargo-0.47.0/vendor/core-foundation/src/propertylist.rs --- cargo-0.44.1/vendor/core-foundation/src/propertylist.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation/src/propertylist.rs 2020-10-01 21:38:28.000000000 +0000 @@ -121,6 +121,7 @@ #[inline] pub unsafe fn wrap_under_get_rule(reference: CFPropertyListRef) -> CFPropertyList { + assert!(!reference.is_null(), "Attempted to create a NULL object."); let reference = CFRetain(reference); CFPropertyList(reference) } @@ -147,6 +148,7 @@ #[inline] pub unsafe fn wrap_under_create_rule(obj: CFPropertyListRef) -> CFPropertyList { + assert!(!obj.is_null(), "Attempted to create a NULL object."); CFPropertyList(obj) } @@ -262,7 +264,7 @@ let boo = CFString::from_static_string("Boo"); let foo = CFString::from_static_string("Foo"); let tru = CFBoolean::true_value(); - let n42 = CFNumber::from(42); + let n42 = CFNumber::from(1i64<<33); let dict1 = CFDictionary::from_CFType_pairs(&[(bar.as_CFType(), boo.as_CFType()), (baz.as_CFType(), tru.as_CFType()), diff -Nru cargo-0.44.1/vendor/core-foundation/src/set.rs cargo-0.47.0/vendor/core-foundation/src/set.rs --- cargo-0.44.1/vendor/core-foundation/src/set.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation/src/set.rs 2020-10-01 21:38:28.000000000 +0000 @@ -42,3 +42,12 @@ } } } + +impl CFSet { + /// Get the number of elements in the CFSet + pub fn len(&self) -> usize { + unsafe { + CFSetGetCount(self.0) as usize + } + } +} diff -Nru cargo-0.44.1/vendor/core-foundation/src/string.rs cargo-0.47.0/vendor/core-foundation/src/string.rs --- cargo-0.44.1/vendor/core-foundation/src/string.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation/src/string.rs 2020-10-01 21:38:28.000000000 +0000 @@ -141,6 +141,53 @@ } } +impl<'a> PartialEq<&'a str> for CFString { + fn eq(&self, other: &&str) -> bool { + unsafe { + let temp = CFStringCreateWithBytesNoCopy(kCFAllocatorDefault, + other.as_ptr(), + other.len().to_CFIndex(), + kCFStringEncodingUTF8, + false as Boolean, + kCFAllocatorNull); + self.eq(&CFString::wrap_under_create_rule(temp)) + } + } +} + +impl<'a> PartialEq for &'a str { + #[inline] + fn eq(&self, other: &CFString) -> bool { + other.eq(self) + } +} + +impl PartialEq for String { + #[inline] + fn eq(&self, other: &CFString) -> bool { + other.eq(&self.as_str()) + } +} + +impl PartialEq for CFString { + #[inline] + fn eq(&self, other: &String) -> bool { + self.eq(&other.as_str()) + } +} + +#[test] +fn str_cmp() { + let cfstr = CFString::new("hello"); + assert_eq!("hello", cfstr); + assert_eq!(cfstr, "hello"); + assert_ne!(cfstr, "wrong"); + assert_ne!("wrong", cfstr); + let hello = String::from("hello"); + assert_eq!(hello, cfstr); + assert_eq!(cfstr, hello); +} + #[test] fn string_and_back() { let original = "The quick brown fox jumped over the slow lazy dog."; diff -Nru cargo-0.44.1/vendor/core-foundation/src/timezone.rs cargo-0.47.0/vendor/core-foundation/src/timezone.rs --- cargo-0.44.1/vendor/core-foundation/src/timezone.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation/src/timezone.rs 2020-10-01 21:38:28.000000000 +0000 @@ -14,6 +14,7 @@ use base::TCFType; use date::{CFDate, CFTimeInterval}; +use string::CFString; #[cfg(feature = "with-chrono")] use chrono::{FixedOffset, NaiveDateTime}; @@ -68,6 +69,14 @@ pub fn from_offset(offset: FixedOffset) -> CFTimeZone { CFTimeZone::new(offset.local_minus_utc() as f64) } + + /// The timezone database ID that identifies the time zone. E.g. "America/Los_Angeles" or + /// "Europe/Paris". + pub fn name(&self) -> CFString { + unsafe { + CFString::wrap_under_get_rule(CFTimeZoneGetName(self.0)) + } + } } #[cfg(test)] diff -Nru cargo-0.44.1/vendor/core-foundation/src/url.rs cargo-0.47.0/vendor/core-foundation/src/url.rs --- cargo-0.44.1/vendor/core-foundation/src/url.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation/src/url.rs 2020-10-01 21:38:28.000000000 +0000 @@ -16,7 +16,6 @@ use core_foundation_sys::base::{kCFAllocatorDefault, Boolean}; use std::fmt; -use std::mem::MaybeUninit; use std::ptr; use std::path::{Path, PathBuf}; diff -Nru cargo-0.44.1/vendor/core-foundation-sys/.cargo-checksum.json cargo-0.47.0/vendor/core-foundation-sys/.cargo-checksum.json --- cargo-0.44.1/vendor/core-foundation-sys/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation-sys/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac"} \ No newline at end of file +{"files":{},"package":"c0af3b5e4601de3837c9332e29e0aae47a0d46ebfa246d12b82f564bac233393"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/core-foundation-sys/Cargo.toml cargo-0.47.0/vendor/core-foundation-sys/Cargo.toml --- cargo-0.44.1/vendor/core-foundation-sys/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation-sys/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,13 +12,15 @@ [package] name = "core-foundation-sys" -version = "0.7.0" +version = "0.8.1" authors = ["The Servo Project Developers"] build = "build.rs" description = "Bindings to Core Foundation for macOS" homepage = "https://github.com/servo/core-foundation-rs" license = "MIT / Apache-2.0" repository = "https://github.com/servo/core-foundation-rs" +[package.metadata.docs.rs] +default-target = "x86_64-apple-darwin" [dependencies] diff -Nru cargo-0.44.1/vendor/core-foundation-sys/src/attributed_string.rs cargo-0.47.0/vendor/core-foundation-sys/src/attributed_string.rs --- cargo-0.44.1/vendor/core-foundation-sys/src/attributed_string.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation-sys/src/attributed_string.rs 2020-10-01 21:38:28.000000000 +0000 @@ -52,5 +52,4 @@ value: CFTypeRef, ); - pub fn CFMutableAttributedStringGetTypeID() -> CFTypeID; } diff -Nru cargo-0.44.1/vendor/core-foundation-sys/src/base.rs cargo-0.47.0/vendor/core-foundation-sys/src/base.rs --- cargo-0.44.1/vendor/core-foundation-sys/src/base.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation-sys/src/base.rs 2020-10-01 21:38:28.000000000 +0000 @@ -107,6 +107,9 @@ } } +/// Constant used by some functions to indicate failed searches. +pub static kCFNotFound: CFIndex = -1; + extern { /* * CFBase.h diff -Nru cargo-0.44.1/vendor/core-foundation-sys/src/bundle.rs cargo-0.47.0/vendor/core-foundation-sys/src/bundle.rs --- cargo-0.44.1/vendor/core-foundation-sys/src/bundle.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation-sys/src/bundle.rs 2020-10-01 21:38:28.000000000 +0000 @@ -34,4 +34,5 @@ pub fn CFBundleCopyExecutableURL(bundle: CFBundleRef) -> CFURLRef; pub fn CFBundleCopyPrivateFrameworksURL(bundle: CFBundleRef) -> CFURLRef; pub fn CFBundleCopySharedSupportURL(bundle: CFBundleRef) -> CFURLRef; + pub fn CFBundleCopyBundleURL(bundle: CFBundleRef) -> CFURLRef; } diff -Nru cargo-0.44.1/vendor/core-foundation-sys/src/set.rs cargo-0.47.0/vendor/core-foundation-sys/src/set.rs --- cargo-0.44.1/vendor/core-foundation-sys/src/set.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation-sys/src/set.rs 2020-10-01 21:38:28.000000000 +0000 @@ -9,7 +9,7 @@ use std::os::raw::c_void; -use base::{CFAllocatorRef, CFIndex, CFTypeID}; +use base::{CFAllocatorRef, CFIndex, CFTypeID, Boolean}; pub type CFSetApplierFunction = extern "C" fn (value: *const c_void, context: *const c_void); @@ -45,14 +45,22 @@ /* Creating Sets */ pub fn CFSetCreate(allocator: CFAllocatorRef, values: *const *const c_void, numValues: CFIndex, callBacks: *const CFSetCallBacks) -> CFSetRef; + pub fn CFSetCreateCopy(allocator: CFAllocatorRef, theSet: CFSetRef) -> CFSetRef; + + /* Examining a Set */ + pub fn CFSetContainsValue(theSet: CFSetRef, value: *const c_void) -> Boolean; + pub fn CFSetGetCount(theSet: CFSetRef) -> CFIndex; + pub fn CFSetGetCountOfValue(theSet: CFSetRef, value: *const c_void) -> CFIndex; + pub fn CFSetGetValue(theSet: CFSetRef, value: *const c_void) -> *const c_void; + pub fn CFSetGetValueIfPresent(theSet: CFSetRef, candidate: *const c_void, value: *mut *const c_void) -> Boolean; + pub fn CFSetGetValues(theSet: CFSetRef, values: *mut *const c_void); /* Applying a Function to Set Members */ pub fn CFSetApplyFunction(theSet: CFSetRef, applier: CFSetApplierFunction, context: *const c_void); - pub fn CFSetGetCount(theSet: CFSetRef) -> CFIndex; - + /* Getting the CFSet Type ID */ pub fn CFSetGetTypeID() -> CFTypeID; } diff -Nru cargo-0.44.1/vendor/core-foundation-sys/src/timezone.rs cargo-0.47.0/vendor/core-foundation-sys/src/timezone.rs --- cargo-0.44.1/vendor/core-foundation-sys/src/timezone.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/core-foundation-sys/src/timezone.rs 2020-10-01 21:38:28.000000000 +0000 @@ -11,6 +11,7 @@ use base::{CFAllocatorRef, CFTypeID}; use date::{CFTimeInterval, CFAbsoluteTime}; +use string::CFStringRef; #[repr(C)] pub struct __CFTimeZone(c_void); @@ -24,4 +25,5 @@ pub fn CFTimeZoneGetSecondsFromGMT(tz: CFTimeZoneRef, time: CFAbsoluteTime) -> CFTimeInterval; pub fn CFTimeZoneGetTypeID() -> CFTypeID; + pub fn CFTimeZoneGetName(tz: CFTimeZoneRef) -> CFStringRef; } diff -Nru cargo-0.44.1/vendor/crossbeam-channel/benches/crossbeam.rs cargo-0.47.0/vendor/crossbeam-channel/benches/crossbeam.rs --- cargo-0.44.1/vendor/crossbeam-channel/benches/crossbeam.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/benches/crossbeam.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,715 +0,0 @@ -#![feature(test)] - -extern crate crossbeam_channel; -extern crate crossbeam_utils; -extern crate num_cpus; -extern crate test; - -use crossbeam_channel::{bounded, unbounded}; -use crossbeam_utils::thread::scope; -use test::Bencher; - -const TOTAL_STEPS: usize = 40_000; - -mod unbounded { - use super::*; - - #[bench] - fn create(b: &mut Bencher) { - b.iter(|| unbounded::()); - } - - #[bench] - fn oneshot(b: &mut Bencher) { - b.iter(|| { - let (s, r) = unbounded::(); - s.send(0).unwrap(); - r.recv().unwrap(); - }); - } - - #[bench] - fn inout(b: &mut Bencher) { - let (s, r) = unbounded::(); - b.iter(|| { - s.send(0).unwrap(); - r.recv().unwrap(); - }); - } - - #[bench] - fn par_inout(b: &mut Bencher) { - let threads = num_cpus::get(); - let steps = TOTAL_STEPS / threads; - let (s, r) = unbounded::(); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn spsc(b: &mut Bencher) { - let steps = TOTAL_STEPS; - let (s, r) = unbounded::(); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - - b.iter(|| { - s1.send(()).unwrap(); - for _ in 0..steps { - r.recv().unwrap(); - } - r2.recv().unwrap(); - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn spmc(b: &mut Bencher) { - let threads = num_cpus::get() - 1; - let steps = TOTAL_STEPS / threads; - let (s, r) = unbounded::(); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for _ in 0..steps { - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for i in 0..steps * threads { - s.send(i as i32).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn mpsc(b: &mut Bencher) { - let threads = num_cpus::get() - 1; - let steps = TOTAL_STEPS / threads; - let (s, r) = unbounded::(); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..steps * threads { - r.recv().unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn mpmc(b: &mut Bencher) { - let threads = num_cpus::get(); - let steps = TOTAL_STEPS / threads; - let (s, r) = unbounded::(); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads / 2 { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - for _ in 0..threads / 2 { - scope.spawn(|_| { - while r1.recv().is_ok() { - for _ in 0..steps { - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } -} - -mod bounded_n { - use super::*; - - #[bench] - fn spsc(b: &mut Bencher) { - let steps = TOTAL_STEPS; - let (s, r) = bounded::(steps); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - - b.iter(|| { - s1.send(()).unwrap(); - for _ in 0..steps { - r.recv().unwrap(); - } - r2.recv().unwrap(); - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn spmc(b: &mut Bencher) { - let threads = num_cpus::get() - 1; - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(steps * threads); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for _ in 0..steps { - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for i in 0..steps * threads { - s.send(i as i32).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn mpsc(b: &mut Bencher) { - let threads = num_cpus::get() - 1; - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(steps * threads); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..steps * threads { - r.recv().unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn par_inout(b: &mut Bencher) { - let threads = num_cpus::get(); - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(threads); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn mpmc(b: &mut Bencher) { - let threads = num_cpus::get(); - assert_eq!(threads % 2, 0); - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(steps * threads); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads / 2 { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - for _ in 0..threads / 2 { - scope.spawn(|_| { - while r1.recv().is_ok() { - for _ in 0..steps { - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } -} - -mod bounded_1 { - use super::*; - - #[bench] - fn create(b: &mut Bencher) { - b.iter(|| bounded::(1)); - } - - #[bench] - fn oneshot(b: &mut Bencher) { - b.iter(|| { - let (s, r) = bounded::(1); - s.send(0).unwrap(); - r.recv().unwrap(); - }); - } - - #[bench] - fn spsc(b: &mut Bencher) { - let steps = TOTAL_STEPS; - let (s, r) = bounded::(1); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - - b.iter(|| { - s1.send(()).unwrap(); - for _ in 0..steps { - r.recv().unwrap(); - } - r2.recv().unwrap(); - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn spmc(b: &mut Bencher) { - let threads = num_cpus::get() - 1; - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(1); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for _ in 0..steps { - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for i in 0..steps * threads { - s.send(i as i32).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn mpsc(b: &mut Bencher) { - let threads = num_cpus::get() - 1; - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(1); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..steps * threads { - r.recv().unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn mpmc(b: &mut Bencher) { - let threads = num_cpus::get(); - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(1); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads / 2 { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - for _ in 0..threads / 2 { - scope.spawn(|_| { - while r1.recv().is_ok() { - for _ in 0..steps { - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } -} - -mod bounded_0 { - use super::*; - - #[bench] - fn create(b: &mut Bencher) { - b.iter(|| bounded::(0)); - } - - #[bench] - fn spsc(b: &mut Bencher) { - let steps = TOTAL_STEPS; - let (s, r) = bounded::(0); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - - b.iter(|| { - s1.send(()).unwrap(); - for _ in 0..steps { - r.recv().unwrap(); - } - r2.recv().unwrap(); - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn spmc(b: &mut Bencher) { - let threads = num_cpus::get() - 1; - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(0); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for _ in 0..steps { - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for i in 0..steps * threads { - s.send(i as i32).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn mpsc(b: &mut Bencher) { - let threads = num_cpus::get() - 1; - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(0); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..steps * threads { - r.recv().unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn mpmc(b: &mut Bencher) { - let threads = num_cpus::get(); - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(0); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads / 2 { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - for _ in 0..threads / 2 { - scope.spawn(|_| { - while r1.recv().is_ok() { - for _ in 0..steps { - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/.cargo-checksum.json cargo-0.47.0/vendor/crossbeam-channel/.cargo-checksum.json --- cargo-0.44.1/vendor/crossbeam-channel/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{},"package":"cced8691919c02aac3cb0a1bc2e9b73d89e832bf9a06fc579d4e71b68a2da061"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/crossbeam-channel/Cargo.lock cargo-0.47.0/vendor/crossbeam-channel/Cargo.lock --- cargo-0.44.1/vendor/crossbeam-channel/Cargo.lock 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/Cargo.lock 1970-01-01 00:00:00.000000000 +0000 @@ -1,264 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -[[package]] -name = "arc-swap" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "autocfg" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "autocfg" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "bitflags" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-channel" -version = "0.4.2" -dependencies = [ - "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", - "signal-hook 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "hermit-abi" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "libc" -version = "0.2.67" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "num_cpus" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "hermit-abi 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_chacha" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "rand_hc" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_isaac" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_jitter" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_os" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_pcg" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_xorshift" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "signal-hook" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", - "signal-hook-registry 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "signal-hook-registry" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "arc-swap 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "winapi" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[metadata] -"checksum arc-swap 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d7b8a9123b8027467bce0099fe556c628a53c8d83df0507084c31e9ba2e39aff" -"checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" -"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" -"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" -"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -"checksum crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" -"checksum hermit-abi 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "e2c55f143919fbc0bc77e427fe2d74cf23786d7c1875666f2fde3ac3c659bb67" -"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -"checksum libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018" -"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" -"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" -"checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -"checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" -"checksum rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" -"checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -"checksum rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -"checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -"checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -"checksum signal-hook 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "10b9f3a1686a29f53cfd91ee5e3db3c12313ec02d33765f02c1a9645a1811e2c" -"checksum signal-hook-registry 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41" -"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" -"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff -Nru cargo-0.44.1/vendor/crossbeam-channel/Cargo.toml cargo-0.47.0/vendor/crossbeam-channel/Cargo.toml --- cargo-0.44.1/vendor/crossbeam-channel/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "crossbeam-channel" -version = "0.4.2" -authors = ["The Crossbeam Project Developers"] -description = "Multi-producer multi-consumer channels for message passing" -homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-channel" -documentation = "https://docs.rs/crossbeam-channel" -readme = "README.md" -keywords = ["channel", "mpmc", "select", "golang", "message"] -categories = ["algorithms", "concurrency", "data-structures"] -license = "MIT/Apache-2.0 AND BSD-2-Clause" -repository = "https://github.com/crossbeam-rs/crossbeam" -[dependencies.crossbeam-utils] -version = "0.7" - -[dependencies.maybe-uninit] -version = "2.0.0" -[dev-dependencies.num_cpus] -version = "1.10.0" - -[dev-dependencies.rand] -version = "0.6" - -[dev-dependencies.signal-hook] -version = "0.1.5" diff -Nru cargo-0.44.1/vendor/crossbeam-channel/CHANGELOG.md cargo-0.47.0/vendor/crossbeam-channel/CHANGELOG.md --- cargo-0.44.1/vendor/crossbeam-channel/CHANGELOG.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/CHANGELOG.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,158 +0,0 @@ -# Version 0.4.2 - -- Fix bug in release (yanking 0.4.1) - -# Version 0.4.1 - -- Avoid time drift in `channel::tick`. (#456) -- Fix unsoundness issues by adopting `MaybeUninit`. (#458) - -# Version 0.4.0 - -- Bump the minimum required version to 1.28. -- Bump `crossbeam-utils` to `0.7`. - -# Version 0.3.9 - -- Fix a bug in reference counting. -- Optimize `recv_timeout()`. -- Add `Select::remove()`. -- Various small improvements, code cleanup, more tests. - -# Version 0.3.8 - -- Bump the minimum required version of `crossbeam-utils`. - -# Version 0.3.7 - -- Remove `parking_lot` and `rand` dependencies. -- Expand documentation. -- Implement `Default` for `Select`. -- Make `size_of::>()` smaller. -- Several minor optimizations. -- Add more tests. - -# Version 0.3.6 - -- Fix a bug in initialization of unbounded channels. - -# Version 0.3.5 - -- New implementation for unbounded channels. -- A number of small performance improvements. -- Remove `crossbeam-epoch` dependency. - -# Version 0.3.4 - -- Bump `crossbeam-epoch` to `0.7`. -- Improve documentation. - -# Version 0.3.3 - -- Relax the lifetime in `SelectedOperation<'_>`. -- Add `Select::try_ready()`, `Select::ready()`, and `Select::ready_timeout()`. -- Update licensing notices. -- Improve documentation. -- Add methods `is_disconnected()`, `is_timeout()`, `is_empty()`, and `is_full()` on error types. - -# Version 0.3.2 - -- More elaborate licensing notices. - -# Version 0.3.1 - -- Update `crossbeam-utils` to `0.6`. - -# Version 0.3.0 - -- Add a special `never` channel type. -- Dropping all receivers now closes the channel. -- The interface of sending and receiving methods is now very similar to those in v0.1. -- The syntax for `send` in `select!` is now `send(sender, msg) -> res => body`. -- The syntax for `recv` in `select!` is now `recv(receiver) -> res => body`. -- New, more efficient interface for `Select` without callbacks. -- Timeouts can be specified in `select!`. - -# Version 0.2.6 - -- `Select` struct that can add cases dynamically. -- More documentation (in particular, the FAQ section). -- Optimize contended sends/receives in unbounded channels. - -# Version 0.2.5 - -- Use `LocalKey::try_with` instead of `LocalKey::with`. -- Remove helper macros `__crossbeam_channel*`. - -# Version 0.2.4 - -- Make `select!` linearizable with other channel operations. -- Update `crossbeam-utils` to `0.5.0`. -- Update `parking_lot` to `0.6.3`. -- Remove Mac OS X tests. - -# Version 0.2.3 - -- Add Mac OS X tests. -- Lower some memory orderings. -- Eliminate calls to `mem::unitialized`, which caused bugs with ZST. - -# Version 0.2.2 - -- Add more tests. -- Update `crossbeam-epoch` to 0.5.0 -- Initialize the RNG seed to a random value. -- Replace `libc::abort` with `std::process::abort`. -- Ignore clippy warnings in `select!`. -- Better interaction of `select!` with the NLL borrow checker. - -# Version 0.2.1 - -- Fix compilation errors when using `select!` with `#[deny(unsafe_code)]`. - -# Version 0.2.0 - -- Implement `IntoIterator` for `Receiver`. -- Add a new `select!` macro. -- Add special channels `after` and `tick`. -- Dropping receivers doesn't close the channel anymore. -- Change the signature of `recv`, `send`, and `try_recv`. -- Remove `Sender::is_closed` and `Receiver::is_closed`. -- Remove `Sender::close` and `Receiver::close`. -- Remove `Sender::send_timeout` and `Receiver::recv_timeout`. -- Remove `Sender::try_send`. -- Remove `Select` and `select_loop!`. -- Remove all error types. -- Remove `Iter`, `TryIter`, and `IntoIter`. -- Remove the `nightly` feature. -- Remove ordering operators for `Sender` and `Receiver`. - -# Version 0.1.3 - -- Add `Sender::disconnect` and `Receiver::disconnect`. -- Implement comparison operators for `Sender` and `Receiver`. -- Allow arbitrary patterns in place of `msg` in `recv(r, msg)`. -- Add a few conversion impls between error types. -- Add benchmarks for `atomicring` and `mpmc`. -- Add benchmarks for different message sizes. -- Documentation improvements. -- Update `crossbeam-epoch` to 0.4.0 -- Update `crossbeam-utils` to 0.3.0 -- Update `parking_lot` to 0.5 -- Update `rand` to 0.4 - -# Version 0.1.2 - -- Allow conditional cases in `select_loop!` macro. -- Fix typos in documentation. -- Fix deadlock in selection when all channels are disconnected and a timeout is specified. - -# Version 0.1.1 - -- Implement `Debug` for `Sender`, `Receiver`, `Iter`, `TryIter`, `IntoIter`, and `Select`. -- Implement `Default` for `Select`. - -# Version 0.1.0 - -- First implementation of the channels. -- Add `select_loop!` macro by @TimNN. diff -Nru cargo-0.44.1/vendor/crossbeam-channel/examples/fibonacci.rs cargo-0.47.0/vendor/crossbeam-channel/examples/fibonacci.rs --- cargo-0.44.1/vendor/crossbeam-channel/examples/fibonacci.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/examples/fibonacci.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -//! An asynchronous fibonacci sequence generator. - -extern crate crossbeam_channel; - -use std::thread; - -use crossbeam_channel::{bounded, Sender}; - -// Sends the Fibonacci sequence into the channel until it becomes disconnected. -fn fibonacci(sender: Sender) { - let (mut x, mut y) = (0, 1); - while sender.send(x).is_ok() { - let tmp = x; - x = y; - y = tmp + y; - } -} - -fn main() { - let (s, r) = bounded(0); - thread::spawn(|| fibonacci(s)); - - // Print the first 20 Fibonacci numbers. - for num in r.iter().take(20) { - println!("{}", num); - } -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/examples/matching.rs cargo-0.47.0/vendor/crossbeam-channel/examples/matching.rs --- cargo-0.44.1/vendor/crossbeam-channel/examples/matching.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/examples/matching.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,76 +0,0 @@ -//! Using `select!` to send and receive on the same channel at the same time. -//! -//! This example is based on the following program in Go. -//! -//! Source: -//! - https://web.archive.org/web/20171209034309/https://www.nada.kth.se/~snilsson/concurrency -//! - http://www.nada.kth.se/~snilsson/concurrency/src/matching.go -//! -//! Copyright & License: -//! - Stefan Nilsson -//! - Creative Commons Attribution 3.0 Unported License -//! - https://creativecommons.org/licenses/by/3.0/ -//! -//! ```go -//! func main() { -//! people := []string{"Anna", "Bob", "Cody", "Dave", "Eva"} -//! match := make(chan string, 1) // Make room for one unmatched send. -//! wg := new(sync.WaitGroup) -//! for _, name := range people { -//! wg.Add(1) -//! go Seek(name, match, wg) -//! } -//! wg.Wait() -//! select { -//! case name := <-match: -//! fmt.Printf("No one received %s’s message.\n", name) -//! default: -//! // There was no pending send operation. -//! } -//! } -//! -//! // Seek either sends or receives, whichever possible, a name on the match -//! // channel and notifies the wait group when done. -//! func Seek(name string, match chan string, wg *sync.WaitGroup) { -//! select { -//! case peer := <-match: -//! fmt.Printf("%s received a message from %s.\n", name, peer) -//! case match <- name: -//! // Wait for someone to receive my message. -//! } -//! wg.Done() -//! } -//! ``` - -#[macro_use] -extern crate crossbeam_channel; -extern crate crossbeam_utils; - -use crossbeam_channel::bounded; -use crossbeam_utils::thread; - -fn main() { - let people = vec!["Anna", "Bob", "Cody", "Dave", "Eva"]; - let (s, r) = bounded(1); // Make room for one unmatched send. - - // Either send my name into the channel or receive someone else's, whatever happens first. - let seek = |name, s, r| { - select! { - recv(r) -> peer => println!("{} received a message from {}.", name, peer.unwrap()), - send(s, name) -> _ => {}, // Wait for someone to receive my message. - } - }; - - thread::scope(|scope| { - for name in people { - let (s, r) = (s.clone(), r.clone()); - scope.spawn(move |_| seek(name, s, r)); - } - }) - .unwrap(); - - // Check if there is a pending send operation. - if let Ok(name) = r.try_recv() { - println!("No one received {}’s message.", name); - } -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/examples/stopwatch.rs cargo-0.47.0/vendor/crossbeam-channel/examples/stopwatch.rs --- cargo-0.44.1/vendor/crossbeam-channel/examples/stopwatch.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/examples/stopwatch.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ -//! Prints the elapsed time every 1 second and quits on Ctrl+C. - -#[macro_use] -extern crate crossbeam_channel; -extern crate signal_hook; - -use std::io; -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_channel::{bounded, tick, Receiver}; -use signal_hook::iterator::Signals; -use signal_hook::SIGINT; - -// Creates a channel that gets a message every time `SIGINT` is signalled. -fn sigint_notifier() -> io::Result> { - let (s, r) = bounded(100); - let signals = Signals::new(&[SIGINT])?; - - thread::spawn(move || { - for _ in signals.forever() { - if s.send(()).is_err() { - break; - } - } - }); - - Ok(r) -} - -// Prints the elapsed time. -fn show(dur: Duration) { - println!( - "Elapsed: {}.{:03} sec", - dur.as_secs(), - dur.subsec_nanos() / 1_000_000 - ); -} - -fn main() { - let start = Instant::now(); - let update = tick(Duration::from_secs(1)); - let ctrl_c = sigint_notifier().unwrap(); - - loop { - select! { - recv(update) -> _ => { - show(start.elapsed()); - } - recv(ctrl_c) -> _ => { - println!(); - println!("Goodbye!"); - show(start.elapsed()); - break; - } - } - } -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/LICENSE-APACHE cargo-0.47.0/vendor/crossbeam-channel/LICENSE-APACHE --- cargo-0.44.1/vendor/crossbeam-channel/LICENSE-APACHE 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru cargo-0.44.1/vendor/crossbeam-channel/LICENSE-MIT cargo-0.47.0/vendor/crossbeam-channel/LICENSE-MIT --- cargo-0.44.1/vendor/crossbeam-channel/LICENSE-MIT 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2019 The Crossbeam Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.44.1/vendor/crossbeam-channel/LICENSE-THIRD-PARTY cargo-0.47.0/vendor/crossbeam-channel/LICENSE-THIRD-PARTY --- cargo-0.44.1/vendor/crossbeam-channel/LICENSE-THIRD-PARTY 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/LICENSE-THIRD-PARTY 1970-01-01 00:00:00.000000000 +0000 @@ -1,625 +0,0 @@ -=============================================================================== - -Bounded MPMC queue -http://www.1024cores.net/home/code-license - -Copyright (c) 2010-2011 Dmitry Vyukov. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The views and conclusions contained in the software and documentation are those -of the authors and should not be interpreted as representing official policies, -either expressed or implied, of Dmitry Vyukov. - -=============================================================================== - -matching.go -https://creativecommons.org/licenses/by/3.0/legalcode - -Creative Commons Legal Code - -Attribution 3.0 Unported - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR - DAMAGES RESULTING FROM ITS USE. - -License - -THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE -COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY -COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS -AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. - -BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE -TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY -BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS -CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND -CONDITIONS. - -1. Definitions - - a. "Adaptation" means a work based upon the Work, or upon the Work and - other pre-existing works, such as a translation, adaptation, - derivative work, arrangement of music or other alterations of a - literary or artistic work, or phonogram or performance and includes - cinematographic adaptations or any other form in which the Work may be - recast, transformed, or adapted including in any form recognizably - derived from the original, except that a work that constitutes a - Collection will not be considered an Adaptation for the purpose of - this License. For the avoidance of doubt, where the Work is a musical - work, performance or phonogram, the synchronization of the Work in - timed-relation with a moving image ("synching") will be considered an - Adaptation for the purpose of this License. - b. "Collection" means a collection of literary or artistic works, such as - encyclopedias and anthologies, or performances, phonograms or - broadcasts, or other works or subject matter other than works listed - in Section 1(f) below, which, by reason of the selection and - arrangement of their contents, constitute intellectual creations, in - which the Work is included in its entirety in unmodified form along - with one or more other contributions, each constituting separate and - independent works in themselves, which together are assembled into a - collective whole. A work that constitutes a Collection will not be - considered an Adaptation (as defined above) for the purposes of this - License. - c. "Distribute" means to make available to the public the original and - copies of the Work or Adaptation, as appropriate, through sale or - other transfer of ownership. - d. "Licensor" means the individual, individuals, entity or entities that - offer(s) the Work under the terms of this License. - e. "Original Author" means, in the case of a literary or artistic work, - the individual, individuals, entity or entities who created the Work - or if no individual or entity can be identified, the publisher; and in - addition (i) in the case of a performance the actors, singers, - musicians, dancers, and other persons who act, sing, deliver, declaim, - play in, interpret or otherwise perform literary or artistic works or - expressions of folklore; (ii) in the case of a phonogram the producer - being the person or legal entity who first fixes the sounds of a - performance or other sounds; and, (iii) in the case of broadcasts, the - organization that transmits the broadcast. - f. "Work" means the literary and/or artistic work offered under the terms - of this License including without limitation any production in the - literary, scientific and artistic domain, whatever may be the mode or - form of its expression including digital form, such as a book, - pamphlet and other writing; a lecture, address, sermon or other work - of the same nature; a dramatic or dramatico-musical work; a - choreographic work or entertainment in dumb show; a musical - composition with or without words; a cinematographic work to which are - assimilated works expressed by a process analogous to cinematography; - a work of drawing, painting, architecture, sculpture, engraving or - lithography; a photographic work to which are assimilated works - expressed by a process analogous to photography; a work of applied - art; an illustration, map, plan, sketch or three-dimensional work - relative to geography, topography, architecture or science; a - performance; a broadcast; a phonogram; a compilation of data to the - extent it is protected as a copyrightable work; or a work performed by - a variety or circus performer to the extent it is not otherwise - considered a literary or artistic work. - g. "You" means an individual or entity exercising rights under this - License who has not previously violated the terms of this License with - respect to the Work, or who has received express permission from the - Licensor to exercise rights under this License despite a previous - violation. - h. "Publicly Perform" means to perform public recitations of the Work and - to communicate to the public those public recitations, by any means or - process, including by wire or wireless means or public digital - performances; to make available to the public Works in such a way that - members of the public may access these Works from a place and at a - place individually chosen by them; to perform the Work to the public - by any means or process and the communication to the public of the - performances of the Work, including by public digital performance; to - broadcast and rebroadcast the Work by any means including signs, - sounds or images. - i. "Reproduce" means to make copies of the Work by any means including - without limitation by sound or visual recordings and the right of - fixation and reproducing fixations of the Work, including storage of a - protected performance or phonogram in digital form or other electronic - medium. - -2. Fair Dealing Rights. Nothing in this License is intended to reduce, -limit, or restrict any uses free from copyright or rights arising from -limitations or exceptions that are provided for in connection with the -copyright protection under copyright law or other applicable laws. - -3. License Grant. Subject to the terms and conditions of this License, -Licensor hereby grants You a worldwide, royalty-free, non-exclusive, -perpetual (for the duration of the applicable copyright) license to -exercise the rights in the Work as stated below: - - a. to Reproduce the Work, to incorporate the Work into one or more - Collections, and to Reproduce the Work as incorporated in the - Collections; - b. to create and Reproduce Adaptations provided that any such Adaptation, - including any translation in any medium, takes reasonable steps to - clearly label, demarcate or otherwise identify that changes were made - to the original Work. For example, a translation could be marked "The - original work was translated from English to Spanish," or a - modification could indicate "The original work has been modified."; - c. to Distribute and Publicly Perform the Work including as incorporated - in Collections; and, - d. to Distribute and Publicly Perform Adaptations. - e. For the avoidance of doubt: - - i. Non-waivable Compulsory License Schemes. In those jurisdictions in - which the right to collect royalties through any statutory or - compulsory licensing scheme cannot be waived, the Licensor - reserves the exclusive right to collect such royalties for any - exercise by You of the rights granted under this License; - ii. Waivable Compulsory License Schemes. In those jurisdictions in - which the right to collect royalties through any statutory or - compulsory licensing scheme can be waived, the Licensor waives the - exclusive right to collect such royalties for any exercise by You - of the rights granted under this License; and, - iii. Voluntary License Schemes. The Licensor waives the right to - collect royalties, whether individually or, in the event that the - Licensor is a member of a collecting society that administers - voluntary licensing schemes, via that society, from any exercise - by You of the rights granted under this License. - -The above rights may be exercised in all media and formats whether now -known or hereafter devised. The above rights include the right to make -such modifications as are technically necessary to exercise the rights in -other media and formats. Subject to Section 8(f), all rights not expressly -granted by Licensor are hereby reserved. - -4. Restrictions. The license granted in Section 3 above is expressly made -subject to and limited by the following restrictions: - - a. You may Distribute or Publicly Perform the Work only under the terms - of this License. You must include a copy of, or the Uniform Resource - Identifier (URI) for, this License with every copy of the Work You - Distribute or Publicly Perform. You may not offer or impose any terms - on the Work that restrict the terms of this License or the ability of - the recipient of the Work to exercise the rights granted to that - recipient under the terms of the License. You may not sublicense the - Work. You must keep intact all notices that refer to this License and - to the disclaimer of warranties with every copy of the Work You - Distribute or Publicly Perform. When You Distribute or Publicly - Perform the Work, You may not impose any effective technological - measures on the Work that restrict the ability of a recipient of the - Work from You to exercise the rights granted to that recipient under - the terms of the License. This Section 4(a) applies to the Work as - incorporated in a Collection, but this does not require the Collection - apart from the Work itself to be made subject to the terms of this - License. If You create a Collection, upon notice from any Licensor You - must, to the extent practicable, remove from the Collection any credit - as required by Section 4(b), as requested. If You create an - Adaptation, upon notice from any Licensor You must, to the extent - practicable, remove from the Adaptation any credit as required by - Section 4(b), as requested. - b. If You Distribute, or Publicly Perform the Work or any Adaptations or - Collections, You must, unless a request has been made pursuant to - Section 4(a), keep intact all copyright notices for the Work and - provide, reasonable to the medium or means You are utilizing: (i) the - name of the Original Author (or pseudonym, if applicable) if supplied, - and/or if the Original Author and/or Licensor designate another party - or parties (e.g., a sponsor institute, publishing entity, journal) for - attribution ("Attribution Parties") in Licensor's copyright notice, - terms of service or by other reasonable means, the name of such party - or parties; (ii) the title of the Work if supplied; (iii) to the - extent reasonably practicable, the URI, if any, that Licensor - specifies to be associated with the Work, unless such URI does not - refer to the copyright notice or licensing information for the Work; - and (iv) , consistent with Section 3(b), in the case of an Adaptation, - a credit identifying the use of the Work in the Adaptation (e.g., - "French translation of the Work by Original Author," or "Screenplay - based on original Work by Original Author"). The credit required by - this Section 4 (b) may be implemented in any reasonable manner; - provided, however, that in the case of a Adaptation or Collection, at - a minimum such credit will appear, if a credit for all contributing - authors of the Adaptation or Collection appears, then as part of these - credits and in a manner at least as prominent as the credits for the - other contributing authors. For the avoidance of doubt, You may only - use the credit required by this Section for the purpose of attribution - in the manner set out above and, by exercising Your rights under this - License, You may not implicitly or explicitly assert or imply any - connection with, sponsorship or endorsement by the Original Author, - Licensor and/or Attribution Parties, as appropriate, of You or Your - use of the Work, without the separate, express prior written - permission of the Original Author, Licensor and/or Attribution - Parties. - c. Except as otherwise agreed in writing by the Licensor or as may be - otherwise permitted by applicable law, if You Reproduce, Distribute or - Publicly Perform the Work either by itself or as part of any - Adaptations or Collections, You must not distort, mutilate, modify or - take other derogatory action in relation to the Work which would be - prejudicial to the Original Author's honor or reputation. Licensor - agrees that in those jurisdictions (e.g. Japan), in which any exercise - of the right granted in Section 3(b) of this License (the right to - make Adaptations) would be deemed to be a distortion, mutilation, - modification or other derogatory action prejudicial to the Original - Author's honor and reputation, the Licensor will waive or not assert, - as appropriate, this Section, to the fullest extent permitted by the - applicable national law, to enable You to reasonably exercise Your - right under Section 3(b) of this License (right to make Adaptations) - but not otherwise. - -5. Representations, Warranties and Disclaimer - -UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR -OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY -KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, -INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, -FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF -LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, -WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION -OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. - -6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE -LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR -ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES -ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS -BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -7. Termination - - a. This License and the rights granted hereunder will terminate - automatically upon any breach by You of the terms of this License. - Individuals or entities who have received Adaptations or Collections - from You under this License, however, will not have their licenses - terminated provided such individuals or entities remain in full - compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will - survive any termination of this License. - b. Subject to the above terms and conditions, the license granted here is - perpetual (for the duration of the applicable copyright in the Work). - Notwithstanding the above, Licensor reserves the right to release the - Work under different license terms or to stop distributing the Work at - any time; provided, however that any such election will not serve to - withdraw this License (or any other license that has been, or is - required to be, granted under the terms of this License), and this - License will continue in full force and effect unless terminated as - stated above. - -8. Miscellaneous - - a. Each time You Distribute or Publicly Perform the Work or a Collection, - the Licensor offers to the recipient a license to the Work on the same - terms and conditions as the license granted to You under this License. - b. Each time You Distribute or Publicly Perform an Adaptation, Licensor - offers to the recipient a license to the original Work on the same - terms and conditions as the license granted to You under this License. - c. If any provision of this License is invalid or unenforceable under - applicable law, it shall not affect the validity or enforceability of - the remainder of the terms of this License, and without further action - by the parties to this agreement, such provision shall be reformed to - the minimum extent necessary to make such provision valid and - enforceable. - d. No term or provision of this License shall be deemed waived and no - breach consented to unless such waiver or consent shall be in writing - and signed by the party to be charged with such waiver or consent. - e. This License constitutes the entire agreement between the parties with - respect to the Work licensed here. There are no understandings, - agreements or representations with respect to the Work not specified - here. Licensor shall not be bound by any additional provisions that - may appear in any communication from You. This License may not be - modified without the mutual written agreement of the Licensor and You. - f. The rights granted under, and the subject matter referenced, in this - License were drafted utilizing the terminology of the Berne Convention - for the Protection of Literary and Artistic Works (as amended on - September 28, 1979), the Rome Convention of 1961, the WIPO Copyright - Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996 - and the Universal Copyright Convention (as revised on July 24, 1971). - These rights and subject matter take effect in the relevant - jurisdiction in which the License terms are sought to be enforced - according to the corresponding provisions of the implementation of - those treaty provisions in the applicable national law. If the - standard suite of rights granted under applicable copyright law - includes additional rights not granted under this License, such - additional rights are deemed to be included in the License; this - License is not intended to restrict the license of any rights under - applicable law. - - -Creative Commons Notice - - Creative Commons is not a party to this License, and makes no warranty - whatsoever in connection with the Work. Creative Commons will not be - liable to You or any party on any legal theory for any damages - whatsoever, including without limitation any general, special, - incidental or consequential damages arising in connection to this - license. Notwithstanding the foregoing two (2) sentences, if Creative - Commons has expressly identified itself as the Licensor hereunder, it - shall have all rights and obligations of Licensor. - - Except for the limited purpose of indicating to the public that the - Work is licensed under the CCPL, Creative Commons does not authorize - the use by either party of the trademark "Creative Commons" or any - related trademark or logo of Creative Commons without the prior - written consent of Creative Commons. Any permitted use will be in - compliance with Creative Commons' then-current trademark usage - guidelines, as may be published on its website or otherwise made - available upon request from time to time. For the avoidance of doubt, - this trademark restriction does not form part of this License. - - Creative Commons may be contacted at https://creativecommons.org/. - -=============================================================================== - -The Go Programming Language -https://golang.org/LICENSE - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -=============================================================================== - -The Rust Programming Language -https://github.com/rust-lang/rust/blob/master/LICENSE-MIT - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - -=============================================================================== - -The Rust Programming Language -https://github.com/rust-lang/rust/blob/master/LICENSE-APACHE - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru cargo-0.44.1/vendor/crossbeam-channel/README.md cargo-0.47.0/vendor/crossbeam-channel/README.md --- cargo-0.44.1/vendor/crossbeam-channel/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ -# Crossbeam Channel - -[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam.svg?branch=master)]( -https://travis-ci.org/crossbeam-rs/crossbeam) -[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)]( -https://github.com/crossbeam-rs/crossbeam-channel) -[![Cargo](https://img.shields.io/crates/v/crossbeam-channel.svg)]( -https://crates.io/crates/crossbeam-channel) -[![Documentation](https://docs.rs/crossbeam-channel/badge.svg)]( -https://docs.rs/crossbeam-channel) -[![Rust 1.28+](https://img.shields.io/badge/rust-1.28+-lightgray.svg)]( -https://www.rust-lang.org) -[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.gg/BBYwKq) - -This crate provides multi-producer multi-consumer channels for message passing. -It is an alternative to [`std::sync::mpsc`] with more features and better performance. - -Some highlights: - -* [`Sender`]s and [`Receiver`]s can be cloned and shared among threads. -* Two main kinds of channels are [`bounded`] and [`unbounded`]. -* Convenient extra channels like [`after`], [`never`], and [`tick`]. -* The [`select!`] macro can block on multiple channel operations. -* [`Select`] can select over a dynamically built list of channel operations. -* Channels use locks very sparingly for maximum [performance](benchmarks). - -[`std::sync::mpsc`]: https://doc.rust-lang.org/std/sync/mpsc/index.html -[`Sender`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/struct.Sender.html -[`Receiver`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/struct.Receiver.html -[`bounded`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/fn.bounded.html -[`unbounded`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/fn.unbounded.html -[`after`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/fn.after.html -[`never`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/fn.never.html -[`tick`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/fn.tick.html -[`select!`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/macro.select.html -[`Select`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/struct.Select.html - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -crossbeam-channel = "0.4" -``` - -Next, add this to your crate: - -```rust -#[macro_use] -extern crate crossbeam_channel; -``` - -## Compatibility - -The minimum supported Rust version is 1.28. Any change to this is considered a breaking change. - -## License - -Licensed under either of - - * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) - -at your option. - -#### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. - -#### Third party software - -This product includes copies and modifications of software developed by third parties: - -* [examples/matching.rs](examples/matching.rs) includes - [matching.go](http://www.nada.kth.se/~snilsson/concurrency/src/matching.go) by Stefan Nilsson, - licensed under Creative Commons Attribution 3.0 Unported License. - -* [src/flavors/array.rs](src/flavors/array.rs) is based on - [Bounded MPMC queue](http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue) - by Dmitry Vyukov, licensed under the Simplified BSD License and the Apache License, Version 2.0. - -* [tests/mpsc.rs](tests/mpsc.rs) includes modifications of code from The Rust Programming Language, - licensed under the MIT License and the Apache License, Version 2.0. - -* [tests/golang.rs](tests/golang.rs) is based on code from The Go Programming Language, licensed - under the 3-Clause BSD License. - -See the source code files for more details. - -Copies of third party licenses can be found in [LICENSE-THIRD-PARTY](LICENSE-THIRD-PARTY). diff -Nru cargo-0.44.1/vendor/crossbeam-channel/src/channel.rs cargo-0.47.0/vendor/crossbeam-channel/src/channel.rs --- cargo-0.44.1/vendor/crossbeam-channel/src/channel.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/src/channel.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1389 +0,0 @@ -//! The channel interface. - -use std::fmt; -use std::iter::FusedIterator; -use std::mem; -use std::panic::{RefUnwindSafe, UnwindSafe}; -use std::sync::Arc; -use std::time::{Duration, Instant}; - -use context::Context; -use counter; -use err::{RecvError, RecvTimeoutError, SendError, SendTimeoutError, TryRecvError, TrySendError}; -use flavors; -use select::{Operation, SelectHandle, Token}; - -/// Creates a channel of unbounded capacity. -/// -/// This channel has a growable buffer that can hold any number of messages at a time. -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// use crossbeam_channel::unbounded; -/// -/// let (s, r) = unbounded(); -/// -/// // Computes the n-th Fibonacci number. -/// fn fib(n: i32) -> i32 { -/// if n <= 1 { -/// n -/// } else { -/// fib(n - 1) + fib(n - 2) -/// } -/// } -/// -/// // Spawn an asynchronous computation. -/// thread::spawn(move || s.send(fib(20)).unwrap()); -/// -/// // Print the result of the computation. -/// println!("{}", r.recv().unwrap()); -/// ``` -pub fn unbounded() -> (Sender, Receiver) { - let (s, r) = counter::new(flavors::list::Channel::new()); - let s = Sender { - flavor: SenderFlavor::List(s), - }; - let r = Receiver { - flavor: ReceiverFlavor::List(r), - }; - (s, r) -} - -/// Creates a channel of bounded capacity. -/// -/// This channel has a buffer that can hold at most `cap` messages at a time. -/// -/// A special case is zero-capacity channel, which cannot hold any messages. Instead, send and -/// receive operations must appear at the same time in order to pair up and pass the message over. -/// -/// # Examples -/// -/// A channel of capacity 1: -/// -/// ``` -/// use std::thread; -/// use std::time::Duration; -/// use crossbeam_channel::bounded; -/// -/// let (s, r) = bounded(1); -/// -/// // This call returns immediately because there is enough space in the channel. -/// s.send(1).unwrap(); -/// -/// thread::spawn(move || { -/// // This call blocks the current thread because the channel is full. -/// // It will be able to complete only after the first message is received. -/// s.send(2).unwrap(); -/// }); -/// -/// thread::sleep(Duration::from_secs(1)); -/// assert_eq!(r.recv(), Ok(1)); -/// assert_eq!(r.recv(), Ok(2)); -/// ``` -/// -/// A zero-capacity channel: -/// -/// ``` -/// use std::thread; -/// use std::time::Duration; -/// use crossbeam_channel::bounded; -/// -/// let (s, r) = bounded(0); -/// -/// thread::spawn(move || { -/// // This call blocks the current thread until a receive operation appears -/// // on the other side of the channel. -/// s.send(1).unwrap(); -/// }); -/// -/// thread::sleep(Duration::from_secs(1)); -/// assert_eq!(r.recv(), Ok(1)); -/// ``` -pub fn bounded(cap: usize) -> (Sender, Receiver) { - if cap == 0 { - let (s, r) = counter::new(flavors::zero::Channel::new()); - let s = Sender { - flavor: SenderFlavor::Zero(s), - }; - let r = Receiver { - flavor: ReceiverFlavor::Zero(r), - }; - (s, r) - } else { - let (s, r) = counter::new(flavors::array::Channel::with_capacity(cap)); - let s = Sender { - flavor: SenderFlavor::Array(s), - }; - let r = Receiver { - flavor: ReceiverFlavor::Array(r), - }; - (s, r) - } -} - -/// Creates a receiver that delivers a message after a certain duration of time. -/// -/// The channel is bounded with capacity of 1 and never gets disconnected. Exactly one message will -/// be sent into the channel after `duration` elapses. The message is the instant at which it is -/// sent. -/// -/// # Examples -/// -/// Using an `after` channel for timeouts: -/// -/// ``` -/// # #[macro_use] -/// # extern crate crossbeam_channel; -/// # fn main() { -/// use std::time::Duration; -/// use crossbeam_channel::{after, unbounded}; -/// -/// let (s, r) = unbounded::(); -/// let timeout = Duration::from_millis(100); -/// -/// select! { -/// recv(r) -> msg => println!("received {:?}", msg), -/// recv(after(timeout)) -> _ => println!("timed out"), -/// } -/// # } -/// ``` -/// -/// When the message gets sent: -/// -/// ``` -/// use std::thread; -/// use std::time::{Duration, Instant}; -/// use crossbeam_channel::after; -/// -/// // Converts a number of milliseconds into a `Duration`. -/// let ms = |ms| Duration::from_millis(ms); -/// -/// // Returns `true` if `a` and `b` are very close `Instant`s. -/// let eq = |a, b| a + ms(50) > b && b + ms(50) > a; -/// -/// let start = Instant::now(); -/// let r = after(ms(100)); -/// -/// thread::sleep(ms(500)); -/// -/// // This message was sent 100 ms from the start and received 500 ms from the start. -/// assert!(eq(r.recv().unwrap(), start + ms(100))); -/// assert!(eq(Instant::now(), start + ms(500))); -/// ``` -pub fn after(duration: Duration) -> Receiver { - Receiver { - flavor: ReceiverFlavor::After(Arc::new(flavors::after::Channel::new(duration))), - } -} - -/// Creates a receiver that never delivers messages. -/// -/// The channel is bounded with capacity of 0 and never gets disconnected. -/// -/// # Examples -/// -/// Using a `never` channel to optionally add a timeout to [`select!`]: -/// -/// ``` -/// # #[macro_use] -/// # extern crate crossbeam_channel; -/// # fn main() { -/// use std::thread; -/// use std::time::{Duration, Instant}; -/// use crossbeam_channel::{after, never, unbounded}; -/// -/// let (s, r) = unbounded(); -/// -/// thread::spawn(move || { -/// thread::sleep(Duration::from_secs(1)); -/// s.send(1).unwrap(); -/// }); -/// -/// // Suppose this duration can be a `Some` or a `None`. -/// let duration = Some(Duration::from_millis(100)); -/// -/// // Create a channel that times out after the specified duration. -/// let timeout = duration -/// .map(|d| after(d)) -/// .unwrap_or(never()); -/// -/// select! { -/// recv(r) -> msg => assert_eq!(msg, Ok(1)), -/// recv(timeout) -> _ => println!("timed out"), -/// } -/// # } -/// ``` -/// -/// [`select!`]: macro.select.html -pub fn never() -> Receiver { - Receiver { - flavor: ReceiverFlavor::Never(flavors::never::Channel::new()), - } -} - -/// Creates a receiver that delivers messages periodically. -/// -/// The channel is bounded with capacity of 1 and never gets disconnected. Messages will be -/// sent into the channel in intervals of `duration`. Each message is the instant at which it is -/// sent. -/// -/// # Examples -/// -/// Using a `tick` channel to periodically print elapsed time: -/// -/// ``` -/// use std::time::{Duration, Instant}; -/// use crossbeam_channel::tick; -/// -/// let start = Instant::now(); -/// let ticker = tick(Duration::from_millis(100)); -/// -/// for _ in 0..5 { -/// ticker.recv().unwrap(); -/// println!("elapsed: {:?}", start.elapsed()); -/// } -/// ``` -/// -/// When messages get sent: -/// -/// ``` -/// use std::thread; -/// use std::time::{Duration, Instant}; -/// use crossbeam_channel::tick; -/// -/// // Converts a number of milliseconds into a `Duration`. -/// let ms = |ms| Duration::from_millis(ms); -/// -/// // Returns `true` if `a` and `b` are very close `Instant`s. -/// let eq = |a, b| a + ms(50) > b && b + ms(50) > a; -/// -/// let start = Instant::now(); -/// let r = tick(ms(100)); -/// -/// // This message was sent 100 ms from the start and received 100 ms from the start. -/// assert!(eq(r.recv().unwrap(), start + ms(100))); -/// assert!(eq(Instant::now(), start + ms(100))); -/// -/// thread::sleep(ms(500)); -/// -/// // This message was sent 200 ms from the start and received 600 ms from the start. -/// assert!(eq(r.recv().unwrap(), start + ms(200))); -/// assert!(eq(Instant::now(), start + ms(600))); -/// -/// // This message was sent 700 ms from the start and received 700 ms from the start. -/// assert!(eq(r.recv().unwrap(), start + ms(700))); -/// assert!(eq(Instant::now(), start + ms(700))); -/// ``` -pub fn tick(duration: Duration) -> Receiver { - Receiver { - flavor: ReceiverFlavor::Tick(Arc::new(flavors::tick::Channel::new(duration))), - } -} - -/// The sending side of a channel. -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// use crossbeam_channel::unbounded; -/// -/// let (s1, r) = unbounded(); -/// let s2 = s1.clone(); -/// -/// thread::spawn(move || s1.send(1).unwrap()); -/// thread::spawn(move || s2.send(2).unwrap()); -/// -/// let msg1 = r.recv().unwrap(); -/// let msg2 = r.recv().unwrap(); -/// -/// assert_eq!(msg1 + msg2, 3); -/// ``` -pub struct Sender { - flavor: SenderFlavor, -} - -/// Sender flavors. -enum SenderFlavor { - /// Bounded channel based on a preallocated array. - Array(counter::Sender>), - - /// Unbounded channel implemented as a linked list. - List(counter::Sender>), - - /// Zero-capacity channel. - Zero(counter::Sender>), -} - -unsafe impl Send for Sender {} -unsafe impl Sync for Sender {} - -impl UnwindSafe for Sender {} -impl RefUnwindSafe for Sender {} - -impl Sender { - /// Attempts to send a message into the channel without blocking. - /// - /// This method will either send a message into the channel immediately or return an error if - /// the channel is full or disconnected. The returned error contains the original message. - /// - /// If called on a zero-capacity channel, this method will send the message only if there - /// happens to be a receive operation on the other side of the channel at the same time. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{bounded, TrySendError}; - /// - /// let (s, r) = bounded(1); - /// - /// assert_eq!(s.try_send(1), Ok(())); - /// assert_eq!(s.try_send(2), Err(TrySendError::Full(2))); - /// - /// drop(r); - /// assert_eq!(s.try_send(3), Err(TrySendError::Disconnected(3))); - /// ``` - pub fn try_send(&self, msg: T) -> Result<(), TrySendError> { - match &self.flavor { - SenderFlavor::Array(chan) => chan.try_send(msg), - SenderFlavor::List(chan) => chan.try_send(msg), - SenderFlavor::Zero(chan) => chan.try_send(msg), - } - } - - /// Blocks the current thread until a message is sent or the channel is disconnected. - /// - /// If the channel is full and not disconnected, this call will block until the send operation - /// can proceed. If the channel becomes disconnected, this call will wake up and return an - /// error. The returned error contains the original message. - /// - /// If called on a zero-capacity channel, this method will wait for a receive operation to - /// appear on the other side of the channel. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::{bounded, SendError}; - /// - /// let (s, r) = bounded(1); - /// assert_eq!(s.send(1), Ok(())); - /// - /// thread::spawn(move || { - /// assert_eq!(r.recv(), Ok(1)); - /// thread::sleep(Duration::from_secs(1)); - /// drop(r); - /// }); - /// - /// assert_eq!(s.send(2), Ok(())); - /// assert_eq!(s.send(3), Err(SendError(3))); - /// ``` - pub fn send(&self, msg: T) -> Result<(), SendError> { - match &self.flavor { - SenderFlavor::Array(chan) => chan.send(msg, None), - SenderFlavor::List(chan) => chan.send(msg, None), - SenderFlavor::Zero(chan) => chan.send(msg, None), - } - .map_err(|err| match err { - SendTimeoutError::Disconnected(msg) => SendError(msg), - SendTimeoutError::Timeout(_) => unreachable!(), - }) - } - - /// Waits for a message to be sent into the channel, but only for a limited time. - /// - /// If the channel is full and not disconnected, this call will block until the send operation - /// can proceed or the operation times out. If the channel becomes disconnected, this call will - /// wake up and return an error. The returned error contains the original message. - /// - /// If called on a zero-capacity channel, this method will wait for a receive operation to - /// appear on the other side of the channel. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::{bounded, SendTimeoutError}; - /// - /// let (s, r) = bounded(0); - /// - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// assert_eq!(r.recv(), Ok(2)); - /// drop(r); - /// }); - /// - /// assert_eq!( - /// s.send_timeout(1, Duration::from_millis(500)), - /// Err(SendTimeoutError::Timeout(1)), - /// ); - /// assert_eq!( - /// s.send_timeout(2, Duration::from_secs(1)), - /// Ok(()), - /// ); - /// assert_eq!( - /// s.send_timeout(3, Duration::from_millis(500)), - /// Err(SendTimeoutError::Disconnected(3)), - /// ); - /// ``` - pub fn send_timeout(&self, msg: T, timeout: Duration) -> Result<(), SendTimeoutError> { - let deadline = Instant::now() + timeout; - - match &self.flavor { - SenderFlavor::Array(chan) => chan.send(msg, Some(deadline)), - SenderFlavor::List(chan) => chan.send(msg, Some(deadline)), - SenderFlavor::Zero(chan) => chan.send(msg, Some(deadline)), - } - } - - /// Returns `true` if the channel is empty. - /// - /// Note: Zero-capacity channels are always empty. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::unbounded; - /// - /// let (s, r) = unbounded(); - /// assert!(s.is_empty()); - /// - /// s.send(0).unwrap(); - /// assert!(!s.is_empty()); - /// ``` - pub fn is_empty(&self) -> bool { - match &self.flavor { - SenderFlavor::Array(chan) => chan.is_empty(), - SenderFlavor::List(chan) => chan.is_empty(), - SenderFlavor::Zero(chan) => chan.is_empty(), - } - } - - /// Returns `true` if the channel is full. - /// - /// Note: Zero-capacity channels are always full. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::bounded; - /// - /// let (s, r) = bounded(1); - /// - /// assert!(!s.is_full()); - /// s.send(0).unwrap(); - /// assert!(s.is_full()); - /// ``` - pub fn is_full(&self) -> bool { - match &self.flavor { - SenderFlavor::Array(chan) => chan.is_full(), - SenderFlavor::List(chan) => chan.is_full(), - SenderFlavor::Zero(chan) => chan.is_full(), - } - } - - /// Returns the number of messages in the channel. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::unbounded; - /// - /// let (s, r) = unbounded(); - /// assert_eq!(s.len(), 0); - /// - /// s.send(1).unwrap(); - /// s.send(2).unwrap(); - /// assert_eq!(s.len(), 2); - /// ``` - pub fn len(&self) -> usize { - match &self.flavor { - SenderFlavor::Array(chan) => chan.len(), - SenderFlavor::List(chan) => chan.len(), - SenderFlavor::Zero(chan) => chan.len(), - } - } - - /// If the channel is bounded, returns its capacity. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{bounded, unbounded}; - /// - /// let (s, _) = unbounded::(); - /// assert_eq!(s.capacity(), None); - /// - /// let (s, _) = bounded::(5); - /// assert_eq!(s.capacity(), Some(5)); - /// - /// let (s, _) = bounded::(0); - /// assert_eq!(s.capacity(), Some(0)); - /// ``` - pub fn capacity(&self) -> Option { - match &self.flavor { - SenderFlavor::Array(chan) => chan.capacity(), - SenderFlavor::List(chan) => chan.capacity(), - SenderFlavor::Zero(chan) => chan.capacity(), - } - } - - /// Returns `true` if senders belong to the same channel. - /// - /// # Examples - /// - /// ```rust - /// use crossbeam_channel::unbounded; - /// - /// let (s, _) = unbounded::(); - /// - /// let s2 = s.clone(); - /// assert!(s.same_channel(&s2)); - /// - /// let (s3, _) = unbounded(); - /// assert!(!s.same_channel(&s3)); - /// ``` - pub fn same_channel(&self, other: &Sender) -> bool { - match (&self.flavor, &other.flavor) { - (SenderFlavor::Array(ref a), SenderFlavor::Array(ref b)) => a == b, - (SenderFlavor::List(ref a), SenderFlavor::List(ref b)) => a == b, - (SenderFlavor::Zero(ref a), SenderFlavor::Zero(ref b)) => a == b, - _ => false, - } - } -} - -impl Drop for Sender { - fn drop(&mut self) { - unsafe { - match &self.flavor { - SenderFlavor::Array(chan) => chan.release(|c| c.disconnect()), - SenderFlavor::List(chan) => chan.release(|c| c.disconnect()), - SenderFlavor::Zero(chan) => chan.release(|c| c.disconnect()), - } - } - } -} - -impl Clone for Sender { - fn clone(&self) -> Self { - let flavor = match &self.flavor { - SenderFlavor::Array(chan) => SenderFlavor::Array(chan.acquire()), - SenderFlavor::List(chan) => SenderFlavor::List(chan.acquire()), - SenderFlavor::Zero(chan) => SenderFlavor::Zero(chan.acquire()), - }; - - Sender { flavor } - } -} - -impl fmt::Debug for Sender { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.pad("Sender { .. }") - } -} - -/// The receiving side of a channel. -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// use std::time::Duration; -/// use crossbeam_channel::unbounded; -/// -/// let (s, r) = unbounded(); -/// -/// thread::spawn(move || { -/// s.send(1); -/// thread::sleep(Duration::from_secs(1)); -/// s.send(2); -/// }); -/// -/// assert_eq!(r.recv(), Ok(1)); // Received immediately. -/// assert_eq!(r.recv(), Ok(2)); // Received after 1 second. -/// ``` -pub struct Receiver { - flavor: ReceiverFlavor, -} - -/// Receiver flavors. -enum ReceiverFlavor { - /// Bounded channel based on a preallocated array. - Array(counter::Receiver>), - - /// Unbounded channel implemented as a linked list. - List(counter::Receiver>), - - /// Zero-capacity channel. - Zero(counter::Receiver>), - - /// The after flavor. - After(Arc), - - /// The tick flavor. - Tick(Arc), - - /// The never flavor. - Never(flavors::never::Channel), -} - -unsafe impl Send for Receiver {} -unsafe impl Sync for Receiver {} - -impl UnwindSafe for Receiver {} -impl RefUnwindSafe for Receiver {} - -impl Receiver { - /// Attempts to receive a message from the channel without blocking. - /// - /// This method will either receive a message from the channel immediately or return an error - /// if the channel is empty. - /// - /// If called on a zero-capacity channel, this method will receive a message only if there - /// happens to be a send operation on the other side of the channel at the same time. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{unbounded, TryRecvError}; - /// - /// let (s, r) = unbounded(); - /// assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - /// - /// s.send(5).unwrap(); - /// drop(s); - /// - /// assert_eq!(r.try_recv(), Ok(5)); - /// assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); - /// ``` - pub fn try_recv(&self) -> Result { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.try_recv(), - ReceiverFlavor::List(chan) => chan.try_recv(), - ReceiverFlavor::Zero(chan) => chan.try_recv(), - ReceiverFlavor::After(chan) => { - let msg = chan.try_recv(); - unsafe { - mem::transmute_copy::, Result>( - &msg, - ) - } - } - ReceiverFlavor::Tick(chan) => { - let msg = chan.try_recv(); - unsafe { - mem::transmute_copy::, Result>( - &msg, - ) - } - } - ReceiverFlavor::Never(chan) => chan.try_recv(), - } - } - - /// Blocks the current thread until a message is received or the channel is empty and - /// disconnected. - /// - /// If the channel is empty and not disconnected, this call will block until the receive - /// operation can proceed. If the channel is empty and becomes disconnected, this call will - /// wake up and return an error. - /// - /// If called on a zero-capacity channel, this method will wait for a send operation to appear - /// on the other side of the channel. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::{unbounded, RecvError}; - /// - /// let (s, r) = unbounded(); - /// - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// s.send(5).unwrap(); - /// drop(s); - /// }); - /// - /// assert_eq!(r.recv(), Ok(5)); - /// assert_eq!(r.recv(), Err(RecvError)); - /// ``` - pub fn recv(&self) -> Result { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.recv(None), - ReceiverFlavor::List(chan) => chan.recv(None), - ReceiverFlavor::Zero(chan) => chan.recv(None), - ReceiverFlavor::After(chan) => { - let msg = chan.recv(None); - unsafe { - mem::transmute_copy::< - Result, - Result, - >(&msg) - } - } - ReceiverFlavor::Tick(chan) => { - let msg = chan.recv(None); - unsafe { - mem::transmute_copy::< - Result, - Result, - >(&msg) - } - } - ReceiverFlavor::Never(chan) => chan.recv(None), - } - .map_err(|_| RecvError) - } - - /// Waits for a message to be received from the channel, but only for a limited time. - /// - /// If the channel is empty and not disconnected, this call will block until the receive - /// operation can proceed or the operation times out. If the channel is empty and becomes - /// disconnected, this call will wake up and return an error. - /// - /// If called on a zero-capacity channel, this method will wait for a send operation to appear - /// on the other side of the channel. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::{unbounded, RecvTimeoutError}; - /// - /// let (s, r) = unbounded(); - /// - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// s.send(5).unwrap(); - /// drop(s); - /// }); - /// - /// assert_eq!( - /// r.recv_timeout(Duration::from_millis(500)), - /// Err(RecvTimeoutError::Timeout), - /// ); - /// assert_eq!( - /// r.recv_timeout(Duration::from_secs(1)), - /// Ok(5), - /// ); - /// assert_eq!( - /// r.recv_timeout(Duration::from_secs(1)), - /// Err(RecvTimeoutError::Disconnected), - /// ); - /// ``` - pub fn recv_timeout(&self, timeout: Duration) -> Result { - let deadline = Instant::now() + timeout; - - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.recv(Some(deadline)), - ReceiverFlavor::List(chan) => chan.recv(Some(deadline)), - ReceiverFlavor::Zero(chan) => chan.recv(Some(deadline)), - ReceiverFlavor::After(chan) => { - let msg = chan.recv(Some(deadline)); - unsafe { - mem::transmute_copy::< - Result, - Result, - >(&msg) - } - } - ReceiverFlavor::Tick(chan) => { - let msg = chan.recv(Some(deadline)); - unsafe { - mem::transmute_copy::< - Result, - Result, - >(&msg) - } - } - ReceiverFlavor::Never(chan) => chan.recv(Some(deadline)), - } - } - - /// Returns `true` if the channel is empty. - /// - /// Note: Zero-capacity channels are always empty. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::unbounded; - /// - /// let (s, r) = unbounded(); - /// - /// assert!(r.is_empty()); - /// s.send(0).unwrap(); - /// assert!(!r.is_empty()); - /// ``` - pub fn is_empty(&self) -> bool { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.is_empty(), - ReceiverFlavor::List(chan) => chan.is_empty(), - ReceiverFlavor::Zero(chan) => chan.is_empty(), - ReceiverFlavor::After(chan) => chan.is_empty(), - ReceiverFlavor::Tick(chan) => chan.is_empty(), - ReceiverFlavor::Never(chan) => chan.is_empty(), - } - } - - /// Returns `true` if the channel is full. - /// - /// Note: Zero-capacity channels are always full. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::bounded; - /// - /// let (s, r) = bounded(1); - /// - /// assert!(!r.is_full()); - /// s.send(0).unwrap(); - /// assert!(r.is_full()); - /// ``` - pub fn is_full(&self) -> bool { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.is_full(), - ReceiverFlavor::List(chan) => chan.is_full(), - ReceiverFlavor::Zero(chan) => chan.is_full(), - ReceiverFlavor::After(chan) => chan.is_full(), - ReceiverFlavor::Tick(chan) => chan.is_full(), - ReceiverFlavor::Never(chan) => chan.is_full(), - } - } - - /// Returns the number of messages in the channel. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::unbounded; - /// - /// let (s, r) = unbounded(); - /// assert_eq!(r.len(), 0); - /// - /// s.send(1).unwrap(); - /// s.send(2).unwrap(); - /// assert_eq!(r.len(), 2); - /// ``` - pub fn len(&self) -> usize { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.len(), - ReceiverFlavor::List(chan) => chan.len(), - ReceiverFlavor::Zero(chan) => chan.len(), - ReceiverFlavor::After(chan) => chan.len(), - ReceiverFlavor::Tick(chan) => chan.len(), - ReceiverFlavor::Never(chan) => chan.len(), - } - } - - /// If the channel is bounded, returns its capacity. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{bounded, unbounded}; - /// - /// let (_, r) = unbounded::(); - /// assert_eq!(r.capacity(), None); - /// - /// let (_, r) = bounded::(5); - /// assert_eq!(r.capacity(), Some(5)); - /// - /// let (_, r) = bounded::(0); - /// assert_eq!(r.capacity(), Some(0)); - /// ``` - pub fn capacity(&self) -> Option { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.capacity(), - ReceiverFlavor::List(chan) => chan.capacity(), - ReceiverFlavor::Zero(chan) => chan.capacity(), - ReceiverFlavor::After(chan) => chan.capacity(), - ReceiverFlavor::Tick(chan) => chan.capacity(), - ReceiverFlavor::Never(chan) => chan.capacity(), - } - } - - /// A blocking iterator over messages in the channel. - /// - /// Each call to [`next`] blocks waiting for the next message and then returns it. However, if - /// the channel becomes empty and disconnected, it returns [`None`] without blocking. - /// - /// [`next`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#tymethod.next - /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use crossbeam_channel::unbounded; - /// - /// let (s, r) = unbounded(); - /// - /// thread::spawn(move || { - /// s.send(1).unwrap(); - /// s.send(2).unwrap(); - /// s.send(3).unwrap(); - /// drop(s); // Disconnect the channel. - /// }); - /// - /// // Collect all messages from the channel. - /// // Note that the call to `collect` blocks until the sender is dropped. - /// let v: Vec<_> = r.iter().collect(); - /// - /// assert_eq!(v, [1, 2, 3]); - /// ``` - pub fn iter(&self) -> Iter { - Iter { receiver: self } - } - - /// A non-blocking iterator over messages in the channel. - /// - /// Each call to [`next`] returns a message if there is one ready to be received. The iterator - /// never blocks waiting for the next message. - /// - /// [`next`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#tymethod.next - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::unbounded; - /// - /// let (s, r) = unbounded::(); - /// - /// thread::spawn(move || { - /// s.send(1).unwrap(); - /// thread::sleep(Duration::from_secs(1)); - /// s.send(2).unwrap(); - /// thread::sleep(Duration::from_secs(2)); - /// s.send(3).unwrap(); - /// }); - /// - /// thread::sleep(Duration::from_secs(2)); - /// - /// // Collect all messages from the channel without blocking. - /// // The third message hasn't been sent yet so we'll collect only the first two. - /// let v: Vec<_> = r.try_iter().collect(); - /// - /// assert_eq!(v, [1, 2]); - /// ``` - pub fn try_iter(&self) -> TryIter { - TryIter { receiver: self } - } - - /// Returns `true` if receivers belong to the same channel. - /// - /// # Examples - /// - /// ```rust - /// use crossbeam_channel::unbounded; - /// - /// let (_, r) = unbounded::(); - /// - /// let r2 = r.clone(); - /// assert!(r.same_channel(&r2)); - /// - /// let (_, r3) = unbounded(); - /// assert!(!r.same_channel(&r3)); - /// ``` - pub fn same_channel(&self, other: &Receiver) -> bool { - match (&self.flavor, &other.flavor) { - (ReceiverFlavor::Array(a), ReceiverFlavor::Array(b)) => a == b, - (ReceiverFlavor::List(a), ReceiverFlavor::List(b)) => a == b, - (ReceiverFlavor::Zero(a), ReceiverFlavor::Zero(b)) => a == b, - (ReceiverFlavor::After(a), ReceiverFlavor::After(b)) => Arc::ptr_eq(a, b), - (ReceiverFlavor::Tick(a), ReceiverFlavor::Tick(b)) => Arc::ptr_eq(a, b), - (ReceiverFlavor::Never(_), ReceiverFlavor::Never(_)) => true, - _ => false, - } - } -} - -impl Drop for Receiver { - fn drop(&mut self) { - unsafe { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.release(|c| c.disconnect()), - ReceiverFlavor::List(chan) => chan.release(|c| c.disconnect()), - ReceiverFlavor::Zero(chan) => chan.release(|c| c.disconnect()), - ReceiverFlavor::After(_) => {} - ReceiverFlavor::Tick(_) => {} - ReceiverFlavor::Never(_) => {} - } - } - } -} - -impl Clone for Receiver { - fn clone(&self) -> Self { - let flavor = match &self.flavor { - ReceiverFlavor::Array(chan) => ReceiverFlavor::Array(chan.acquire()), - ReceiverFlavor::List(chan) => ReceiverFlavor::List(chan.acquire()), - ReceiverFlavor::Zero(chan) => ReceiverFlavor::Zero(chan.acquire()), - ReceiverFlavor::After(chan) => ReceiverFlavor::After(chan.clone()), - ReceiverFlavor::Tick(chan) => ReceiverFlavor::Tick(chan.clone()), - ReceiverFlavor::Never(_) => ReceiverFlavor::Never(flavors::never::Channel::new()), - }; - - Receiver { flavor } - } -} - -impl fmt::Debug for Receiver { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.pad("Receiver { .. }") - } -} - -impl<'a, T> IntoIterator for &'a Receiver { - type Item = T; - type IntoIter = Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl IntoIterator for Receiver { - type Item = T; - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter { receiver: self } - } -} - -/// A blocking iterator over messages in a channel. -/// -/// Each call to [`next`] blocks waiting for the next message and then returns it. However, if the -/// channel becomes empty and disconnected, it returns [`None`] without blocking. -/// -/// [`next`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#tymethod.next -/// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// use crossbeam_channel::unbounded; -/// -/// let (s, r) = unbounded(); -/// -/// thread::spawn(move || { -/// s.send(1).unwrap(); -/// s.send(2).unwrap(); -/// s.send(3).unwrap(); -/// drop(s); // Disconnect the channel. -/// }); -/// -/// // Collect all messages from the channel. -/// // Note that the call to `collect` blocks until the sender is dropped. -/// let v: Vec<_> = r.iter().collect(); -/// -/// assert_eq!(v, [1, 2, 3]); -/// ``` -pub struct Iter<'a, T: 'a> { - receiver: &'a Receiver, -} - -impl<'a, T> FusedIterator for Iter<'a, T> {} - -impl<'a, T> Iterator for Iter<'a, T> { - type Item = T; - - fn next(&mut self) -> Option { - self.receiver.recv().ok() - } -} - -impl<'a, T> fmt::Debug for Iter<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.pad("Iter { .. }") - } -} - -/// A non-blocking iterator over messages in a channel. -/// -/// Each call to [`next`] returns a message if there is one ready to be received. The iterator -/// never blocks waiting for the next message. -/// -/// [`next`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#tymethod.next -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// use std::time::Duration; -/// use crossbeam_channel::unbounded; -/// -/// let (s, r) = unbounded::(); -/// -/// thread::spawn(move || { -/// s.send(1).unwrap(); -/// thread::sleep(Duration::from_secs(1)); -/// s.send(2).unwrap(); -/// thread::sleep(Duration::from_secs(2)); -/// s.send(3).unwrap(); -/// }); -/// -/// thread::sleep(Duration::from_secs(2)); -/// -/// // Collect all messages from the channel without blocking. -/// // The third message hasn't been sent yet so we'll collect only the first two. -/// let v: Vec<_> = r.try_iter().collect(); -/// -/// assert_eq!(v, [1, 2]); -/// ``` -pub struct TryIter<'a, T: 'a> { - receiver: &'a Receiver, -} - -impl<'a, T> Iterator for TryIter<'a, T> { - type Item = T; - - fn next(&mut self) -> Option { - self.receiver.try_recv().ok() - } -} - -impl<'a, T> fmt::Debug for TryIter<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.pad("TryIter { .. }") - } -} - -/// A blocking iterator over messages in a channel. -/// -/// Each call to [`next`] blocks waiting for the next message and then returns it. However, if the -/// channel becomes empty and disconnected, it returns [`None`] without blocking. -/// -/// [`next`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#tymethod.next -/// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// use crossbeam_channel::unbounded; -/// -/// let (s, r) = unbounded(); -/// -/// thread::spawn(move || { -/// s.send(1).unwrap(); -/// s.send(2).unwrap(); -/// s.send(3).unwrap(); -/// drop(s); // Disconnect the channel. -/// }); -/// -/// // Collect all messages from the channel. -/// // Note that the call to `collect` blocks until the sender is dropped. -/// let v: Vec<_> = r.into_iter().collect(); -/// -/// assert_eq!(v, [1, 2, 3]); -/// ``` -pub struct IntoIter { - receiver: Receiver, -} - -impl FusedIterator for IntoIter {} - -impl Iterator for IntoIter { - type Item = T; - - fn next(&mut self) -> Option { - self.receiver.recv().ok() - } -} - -impl fmt::Debug for IntoIter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.pad("IntoIter { .. }") - } -} - -impl SelectHandle for Sender { - fn try_select(&self, token: &mut Token) -> bool { - match &self.flavor { - SenderFlavor::Array(chan) => chan.sender().try_select(token), - SenderFlavor::List(chan) => chan.sender().try_select(token), - SenderFlavor::Zero(chan) => chan.sender().try_select(token), - } - } - - fn deadline(&self) -> Option { - None - } - - fn register(&self, oper: Operation, cx: &Context) -> bool { - match &self.flavor { - SenderFlavor::Array(chan) => chan.sender().register(oper, cx), - SenderFlavor::List(chan) => chan.sender().register(oper, cx), - SenderFlavor::Zero(chan) => chan.sender().register(oper, cx), - } - } - - fn unregister(&self, oper: Operation) { - match &self.flavor { - SenderFlavor::Array(chan) => chan.sender().unregister(oper), - SenderFlavor::List(chan) => chan.sender().unregister(oper), - SenderFlavor::Zero(chan) => chan.sender().unregister(oper), - } - } - - fn accept(&self, token: &mut Token, cx: &Context) -> bool { - match &self.flavor { - SenderFlavor::Array(chan) => chan.sender().accept(token, cx), - SenderFlavor::List(chan) => chan.sender().accept(token, cx), - SenderFlavor::Zero(chan) => chan.sender().accept(token, cx), - } - } - - fn is_ready(&self) -> bool { - match &self.flavor { - SenderFlavor::Array(chan) => chan.sender().is_ready(), - SenderFlavor::List(chan) => chan.sender().is_ready(), - SenderFlavor::Zero(chan) => chan.sender().is_ready(), - } - } - - fn watch(&self, oper: Operation, cx: &Context) -> bool { - match &self.flavor { - SenderFlavor::Array(chan) => chan.sender().watch(oper, cx), - SenderFlavor::List(chan) => chan.sender().watch(oper, cx), - SenderFlavor::Zero(chan) => chan.sender().watch(oper, cx), - } - } - - fn unwatch(&self, oper: Operation) { - match &self.flavor { - SenderFlavor::Array(chan) => chan.sender().unwatch(oper), - SenderFlavor::List(chan) => chan.sender().unwatch(oper), - SenderFlavor::Zero(chan) => chan.sender().unwatch(oper), - } - } -} - -impl SelectHandle for Receiver { - fn try_select(&self, token: &mut Token) -> bool { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.receiver().try_select(token), - ReceiverFlavor::List(chan) => chan.receiver().try_select(token), - ReceiverFlavor::Zero(chan) => chan.receiver().try_select(token), - ReceiverFlavor::After(chan) => chan.try_select(token), - ReceiverFlavor::Tick(chan) => chan.try_select(token), - ReceiverFlavor::Never(chan) => chan.try_select(token), - } - } - - fn deadline(&self) -> Option { - match &self.flavor { - ReceiverFlavor::Array(_) => None, - ReceiverFlavor::List(_) => None, - ReceiverFlavor::Zero(_) => None, - ReceiverFlavor::After(chan) => chan.deadline(), - ReceiverFlavor::Tick(chan) => chan.deadline(), - ReceiverFlavor::Never(chan) => chan.deadline(), - } - } - - fn register(&self, oper: Operation, cx: &Context) -> bool { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.receiver().register(oper, cx), - ReceiverFlavor::List(chan) => chan.receiver().register(oper, cx), - ReceiverFlavor::Zero(chan) => chan.receiver().register(oper, cx), - ReceiverFlavor::After(chan) => chan.register(oper, cx), - ReceiverFlavor::Tick(chan) => chan.register(oper, cx), - ReceiverFlavor::Never(chan) => chan.register(oper, cx), - } - } - - fn unregister(&self, oper: Operation) { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.receiver().unregister(oper), - ReceiverFlavor::List(chan) => chan.receiver().unregister(oper), - ReceiverFlavor::Zero(chan) => chan.receiver().unregister(oper), - ReceiverFlavor::After(chan) => chan.unregister(oper), - ReceiverFlavor::Tick(chan) => chan.unregister(oper), - ReceiverFlavor::Never(chan) => chan.unregister(oper), - } - } - - fn accept(&self, token: &mut Token, cx: &Context) -> bool { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.receiver().accept(token, cx), - ReceiverFlavor::List(chan) => chan.receiver().accept(token, cx), - ReceiverFlavor::Zero(chan) => chan.receiver().accept(token, cx), - ReceiverFlavor::After(chan) => chan.accept(token, cx), - ReceiverFlavor::Tick(chan) => chan.accept(token, cx), - ReceiverFlavor::Never(chan) => chan.accept(token, cx), - } - } - - fn is_ready(&self) -> bool { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.receiver().is_ready(), - ReceiverFlavor::List(chan) => chan.receiver().is_ready(), - ReceiverFlavor::Zero(chan) => chan.receiver().is_ready(), - ReceiverFlavor::After(chan) => chan.is_ready(), - ReceiverFlavor::Tick(chan) => chan.is_ready(), - ReceiverFlavor::Never(chan) => chan.is_ready(), - } - } - - fn watch(&self, oper: Operation, cx: &Context) -> bool { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.receiver().watch(oper, cx), - ReceiverFlavor::List(chan) => chan.receiver().watch(oper, cx), - ReceiverFlavor::Zero(chan) => chan.receiver().watch(oper, cx), - ReceiverFlavor::After(chan) => chan.watch(oper, cx), - ReceiverFlavor::Tick(chan) => chan.watch(oper, cx), - ReceiverFlavor::Never(chan) => chan.watch(oper, cx), - } - } - - fn unwatch(&self, oper: Operation) { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.receiver().unwatch(oper), - ReceiverFlavor::List(chan) => chan.receiver().unwatch(oper), - ReceiverFlavor::Zero(chan) => chan.receiver().unwatch(oper), - ReceiverFlavor::After(chan) => chan.unwatch(oper), - ReceiverFlavor::Tick(chan) => chan.unwatch(oper), - ReceiverFlavor::Never(chan) => chan.unwatch(oper), - } - } -} - -/// Writes a message into the channel. -pub unsafe fn write(s: &Sender, token: &mut Token, msg: T) -> Result<(), T> { - match &s.flavor { - SenderFlavor::Array(chan) => chan.write(token, msg), - SenderFlavor::List(chan) => chan.write(token, msg), - SenderFlavor::Zero(chan) => chan.write(token, msg), - } -} - -/// Reads a message from the channel. -pub unsafe fn read(r: &Receiver, token: &mut Token) -> Result { - match &r.flavor { - ReceiverFlavor::Array(chan) => chan.read(token), - ReceiverFlavor::List(chan) => chan.read(token), - ReceiverFlavor::Zero(chan) => chan.read(token), - ReceiverFlavor::After(chan) => { - mem::transmute_copy::, Result>(&chan.read(token)) - } - ReceiverFlavor::Tick(chan) => { - mem::transmute_copy::, Result>(&chan.read(token)) - } - ReceiverFlavor::Never(chan) => chan.read(token), - } -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/src/context.rs cargo-0.47.0/vendor/crossbeam-channel/src/context.rs --- cargo-0.44.1/vendor/crossbeam-channel/src/context.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/src/context.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,191 +0,0 @@ -//! Thread-local context used in select. - -use std::cell::Cell; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::thread::{self, Thread, ThreadId}; -use std::time::Instant; - -use crossbeam_utils::Backoff; - -use select::Selected; - -/// Thread-local context used in select. -#[derive(Debug, Clone)] -pub struct Context { - inner: Arc, -} - -/// Inner representation of `Context`. -#[derive(Debug)] -struct Inner { - /// Selected operation. - select: AtomicUsize, - - /// A slot into which another thread may store a pointer to its `Packet`. - packet: AtomicUsize, - - /// Thread handle. - thread: Thread, - - /// Thread id. - thread_id: ThreadId, -} - -impl Context { - /// Creates a new context for the duration of the closure. - #[inline] - pub fn with(f: F) -> R - where - F: FnOnce(&Context) -> R, - { - thread_local! { - /// Cached thread-local context. - static CONTEXT: Cell> = Cell::new(Some(Context::new())); - } - - let mut f = Some(f); - let mut f = move |cx: &Context| -> R { - let f = f.take().unwrap(); - f(cx) - }; - - CONTEXT - .try_with(|cell| match cell.take() { - None => f(&Context::new()), - Some(cx) => { - cx.reset(); - let res = f(&cx); - cell.set(Some(cx)); - res - } - }) - .unwrap_or_else(|_| f(&Context::new())) - } - - /// Creates a new `Context`. - #[cold] - fn new() -> Context { - Context { - inner: Arc::new(Inner { - select: AtomicUsize::new(Selected::Waiting.into()), - packet: AtomicUsize::new(0), - thread: thread::current(), - thread_id: thread::current().id(), - }), - } - } - - /// Resets `select` and `packet`. - #[inline] - fn reset(&self) { - self.inner - .select - .store(Selected::Waiting.into(), Ordering::Release); - self.inner.packet.store(0, Ordering::Release); - } - - /// Attempts to select an operation. - /// - /// On failure, the previously selected operation is returned. - #[inline] - pub fn try_select(&self, select: Selected) -> Result<(), Selected> { - self.inner - .select - .compare_exchange( - Selected::Waiting.into(), - select.into(), - Ordering::AcqRel, - Ordering::Acquire, - ) - .map(|_| ()) - .map_err(|e| e.into()) - } - - /// Returns the selected operation. - #[inline] - pub fn selected(&self) -> Selected { - Selected::from(self.inner.select.load(Ordering::Acquire)) - } - - /// Stores a packet. - /// - /// This method must be called after `try_select` succeeds and there is a packet to provide. - #[inline] - pub fn store_packet(&self, packet: usize) { - if packet != 0 { - self.inner.packet.store(packet, Ordering::Release); - } - } - - /// Waits until a packet is provided and returns it. - #[inline] - pub fn wait_packet(&self) -> usize { - let backoff = Backoff::new(); - loop { - let packet = self.inner.packet.load(Ordering::Acquire); - if packet != 0 { - return packet; - } - backoff.snooze(); - } - } - - /// Waits until an operation is selected and returns it. - /// - /// If the deadline is reached, `Selected::Aborted` will be selected. - #[inline] - pub fn wait_until(&self, deadline: Option) -> Selected { - // Spin for a short time, waiting until an operation is selected. - let backoff = Backoff::new(); - loop { - let sel = Selected::from(self.inner.select.load(Ordering::Acquire)); - if sel != Selected::Waiting { - return sel; - } - - if backoff.is_completed() { - break; - } else { - backoff.snooze(); - } - } - - loop { - // Check whether an operation has been selected. - let sel = Selected::from(self.inner.select.load(Ordering::Acquire)); - if sel != Selected::Waiting { - return sel; - } - - // If there's a deadline, park the current thread until the deadline is reached. - if let Some(end) = deadline { - let now = Instant::now(); - - if now < end { - thread::park_timeout(end - now); - } else { - // The deadline has been reached. Try aborting select. - return match self.try_select(Selected::Aborted) { - Ok(()) => Selected::Aborted, - Err(s) => s, - }; - } - } else { - thread::park(); - } - } - } - - /// Unparks the thread this context belongs to. - #[inline] - pub fn unpark(&self) { - self.inner.thread.unpark(); - } - - /// Returns the id of the thread this context belongs to. - #[inline] - pub fn thread_id(&self) -> ThreadId { - self.inner.thread_id - } -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/src/counter.rs cargo-0.47.0/vendor/crossbeam-channel/src/counter.rs --- cargo-0.44.1/vendor/crossbeam-channel/src/counter.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/src/counter.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,144 +0,0 @@ -//! Reference counter for channels. - -use std::isize; -use std::ops; -use std::process; -use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; - -/// Reference counter internals. -struct Counter { - /// The number of senders associated with the channel. - senders: AtomicUsize, - - /// The number of receivers associated with the channel. - receivers: AtomicUsize, - - /// Set to `true` if the last sender or the last receiver reference deallocates the channel. - destroy: AtomicBool, - - /// The internal channel. - chan: C, -} - -/// Wraps a channel into the reference counter. -pub fn new(chan: C) -> (Sender, Receiver) { - let counter = Box::into_raw(Box::new(Counter { - senders: AtomicUsize::new(1), - receivers: AtomicUsize::new(1), - destroy: AtomicBool::new(false), - chan, - })); - let s = Sender { counter }; - let r = Receiver { counter }; - (s, r) -} - -/// The sending side. -pub struct Sender { - counter: *mut Counter, -} - -impl Sender { - /// Returns the internal `Counter`. - fn counter(&self) -> &Counter { - unsafe { &*self.counter } - } - - /// Acquires another sender reference. - pub fn acquire(&self) -> Sender { - let count = self.counter().senders.fetch_add(1, Ordering::Relaxed); - - // Cloning senders and calling `mem::forget` on the clones could potentially overflow the - // counter. It's very difficult to recover sensibly from such degenerate scenarios so we - // just abort when the count becomes very large. - if count > isize::MAX as usize { - process::abort(); - } - - Sender { - counter: self.counter, - } - } - - /// Releases the sender reference. - /// - /// Function `disconnect` will be called if this is the last sender reference. - pub unsafe fn release bool>(&self, disconnect: F) { - if self.counter().senders.fetch_sub(1, Ordering::AcqRel) == 1 { - disconnect(&self.counter().chan); - - if self.counter().destroy.swap(true, Ordering::AcqRel) { - drop(Box::from_raw(self.counter)); - } - } - } -} - -impl ops::Deref for Sender { - type Target = C; - - fn deref(&self) -> &C { - &self.counter().chan - } -} - -impl PartialEq for Sender { - fn eq(&self, other: &Sender) -> bool { - self.counter == other.counter - } -} - -/// The receiving side. -pub struct Receiver { - counter: *mut Counter, -} - -impl Receiver { - /// Returns the internal `Counter`. - fn counter(&self) -> &Counter { - unsafe { &*self.counter } - } - - /// Acquires another receiver reference. - pub fn acquire(&self) -> Receiver { - let count = self.counter().receivers.fetch_add(1, Ordering::Relaxed); - - // Cloning receivers and calling `mem::forget` on the clones could potentially overflow the - // counter. It's very difficult to recover sensibly from such degenerate scenarios so we - // just abort when the count becomes very large. - if count > isize::MAX as usize { - process::abort(); - } - - Receiver { - counter: self.counter, - } - } - - /// Releases the receiver reference. - /// - /// Function `disconnect` will be called if this is the last receiver reference. - pub unsafe fn release bool>(&self, disconnect: F) { - if self.counter().receivers.fetch_sub(1, Ordering::AcqRel) == 1 { - disconnect(&self.counter().chan); - - if self.counter().destroy.swap(true, Ordering::AcqRel) { - drop(Box::from_raw(self.counter)); - } - } - } -} - -impl ops::Deref for Receiver { - type Target = C; - - fn deref(&self) -> &C { - &self.counter().chan - } -} - -impl PartialEq for Receiver { - fn eq(&self, other: &Receiver) -> bool { - self.counter == other.counter - } -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/src/err.rs cargo-0.47.0/vendor/crossbeam-channel/src/err.rs --- cargo-0.44.1/vendor/crossbeam-channel/src/err.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/src/err.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,451 +0,0 @@ -use std::error; -use std::fmt; - -/// An error returned from the [`send`] method. -/// -/// The message could not be sent because the channel is disconnected. -/// -/// The error contains the message so it can be recovered. -/// -/// [`send`]: struct.Sender.html#method.send -#[derive(PartialEq, Eq, Clone, Copy)] -pub struct SendError(pub T); - -/// An error returned from the [`try_send`] method. -/// -/// The error contains the message being sent so it can be recovered. -/// -/// [`try_send`]: struct.Sender.html#method.try_send -#[derive(PartialEq, Eq, Clone, Copy)] -pub enum TrySendError { - /// The message could not be sent because the channel is full. - /// - /// If this is a zero-capacity channel, then the error indicates that there was no receiver - /// available to receive the message at the time. - Full(T), - - /// The message could not be sent because the channel is disconnected. - Disconnected(T), -} - -/// An error returned from the [`send_timeout`] method. -/// -/// The error contains the message being sent so it can be recovered. -/// -/// [`send_timeout`]: struct.Sender.html#method.send_timeout -#[derive(PartialEq, Eq, Clone, Copy)] -pub enum SendTimeoutError { - /// The message could not be sent because the channel is full and the operation timed out. - /// - /// If this is a zero-capacity channel, then the error indicates that there was no receiver - /// available to receive the message and the operation timed out. - Timeout(T), - - /// The message could not be sent because the channel is disconnected. - Disconnected(T), -} - -/// An error returned from the [`recv`] method. -/// -/// A message could not be received because the channel is empty and disconnected. -/// -/// [`recv`]: struct.Receiver.html#method.recv -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub struct RecvError; - -/// An error returned from the [`try_recv`] method. -/// -/// [`try_recv`]: struct.Receiver.html#method.recv -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub enum TryRecvError { - /// A message could not be received because the channel is empty. - /// - /// If this is a zero-capacity channel, then the error indicates that there was no sender - /// available to send a message at the time. - Empty, - - /// The message could not be received because the channel is empty and disconnected. - Disconnected, -} - -/// An error returned from the [`recv_timeout`] method. -/// -/// [`recv_timeout`]: struct.Receiver.html#method.recv_timeout -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub enum RecvTimeoutError { - /// A message could not be received because the channel is empty and the operation timed out. - /// - /// If this is a zero-capacity channel, then the error indicates that there was no sender - /// available to send a message and the operation timed out. - Timeout, - - /// The message could not be received because the channel is empty and disconnected. - Disconnected, -} - -/// An error returned from the [`try_select`] method. -/// -/// Failed because none of the channel operations were ready. -/// -/// [`try_select`]: struct.Select.html#method.try_select -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub struct TrySelectError; - -/// An error returned from the [`select_timeout`] method. -/// -/// Failed because none of the channel operations became ready before the timeout. -/// -/// [`select_timeout`]: struct.Select.html#method.select_timeout -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub struct SelectTimeoutError; - -/// An error returned from the [`try_ready`] method. -/// -/// Failed because none of the channel operations were ready. -/// -/// [`try_ready`]: struct.Select.html#method.try_ready -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub struct TryReadyError; - -/// An error returned from the [`ready_timeout`] method. -/// -/// Failed because none of the channel operations became ready before the timeout. -/// -/// [`ready_timeout`]: struct.Select.html#method.ready_timeout -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub struct ReadyTimeoutError; - -impl fmt::Debug for SendError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - "SendError(..)".fmt(f) - } -} - -impl fmt::Display for SendError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - "sending on a disconnected channel".fmt(f) - } -} - -impl error::Error for SendError { - fn description(&self) -> &str { - "sending on a disconnected channel" - } - - fn cause(&self) -> Option<&dyn error::Error> { - None - } -} - -impl SendError { - /// Unwraps the message. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::unbounded; - /// - /// let (s, r) = unbounded(); - /// drop(r); - /// - /// if let Err(err) = s.send("foo") { - /// assert_eq!(err.into_inner(), "foo"); - /// } - /// ``` - pub fn into_inner(self) -> T { - self.0 - } -} - -impl fmt::Debug for TrySendError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - TrySendError::Full(..) => "Full(..)".fmt(f), - TrySendError::Disconnected(..) => "Disconnected(..)".fmt(f), - } - } -} - -impl fmt::Display for TrySendError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - TrySendError::Full(..) => "sending on a full channel".fmt(f), - TrySendError::Disconnected(..) => "sending on a disconnected channel".fmt(f), - } - } -} - -impl error::Error for TrySendError { - fn description(&self) -> &str { - match *self { - TrySendError::Full(..) => "sending on a full channel", - TrySendError::Disconnected(..) => "sending on a disconnected channel", - } - } - - fn cause(&self) -> Option<&dyn error::Error> { - None - } -} - -impl From> for TrySendError { - fn from(err: SendError) -> TrySendError { - match err { - SendError(t) => TrySendError::Disconnected(t), - } - } -} - -impl TrySendError { - /// Unwraps the message. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::bounded; - /// - /// let (s, r) = bounded(0); - /// - /// if let Err(err) = s.try_send("foo") { - /// assert_eq!(err.into_inner(), "foo"); - /// } - /// ``` - pub fn into_inner(self) -> T { - match self { - TrySendError::Full(v) => v, - TrySendError::Disconnected(v) => v, - } - } - - /// Returns `true` if the send operation failed because the channel is full. - pub fn is_full(&self) -> bool { - match self { - TrySendError::Full(_) => true, - _ => false, - } - } - - /// Returns `true` if the send operation failed because the channel is disconnected. - pub fn is_disconnected(&self) -> bool { - match self { - TrySendError::Disconnected(_) => true, - _ => false, - } - } -} - -impl fmt::Debug for SendTimeoutError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - "SendTimeoutError(..)".fmt(f) - } -} - -impl fmt::Display for SendTimeoutError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - SendTimeoutError::Timeout(..) => "timed out waiting on send operation".fmt(f), - SendTimeoutError::Disconnected(..) => "sending on a disconnected channel".fmt(f), - } - } -} - -impl error::Error for SendTimeoutError { - fn description(&self) -> &str { - "sending on an empty and disconnected channel" - } - - fn cause(&self) -> Option<&dyn error::Error> { - None - } -} - -impl From> for SendTimeoutError { - fn from(err: SendError) -> SendTimeoutError { - match err { - SendError(e) => SendTimeoutError::Disconnected(e), - } - } -} - -impl SendTimeoutError { - /// Unwraps the message. - /// - /// # Examples - /// - /// ``` - /// use std::time::Duration; - /// use crossbeam_channel::unbounded; - /// - /// let (s, r) = unbounded(); - /// - /// if let Err(err) = s.send_timeout("foo", Duration::from_secs(1)) { - /// assert_eq!(err.into_inner(), "foo"); - /// } - /// ``` - pub fn into_inner(self) -> T { - match self { - SendTimeoutError::Timeout(v) => v, - SendTimeoutError::Disconnected(v) => v, - } - } - - /// Returns `true` if the send operation timed out. - pub fn is_timeout(&self) -> bool { - match self { - SendTimeoutError::Timeout(_) => true, - _ => false, - } - } - - /// Returns `true` if the send operation failed because the channel is disconnected. - pub fn is_disconnected(&self) -> bool { - match self { - SendTimeoutError::Disconnected(_) => true, - _ => false, - } - } -} - -impl fmt::Display for RecvError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - "receiving on an empty and disconnected channel".fmt(f) - } -} - -impl error::Error for RecvError { - fn description(&self) -> &str { - "receiving on an empty and disconnected channel" - } - - fn cause(&self) -> Option<&dyn error::Error> { - None - } -} - -impl fmt::Display for TryRecvError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - TryRecvError::Empty => "receiving on an empty channel".fmt(f), - TryRecvError::Disconnected => "receiving on an empty and disconnected channel".fmt(f), - } - } -} - -impl error::Error for TryRecvError { - fn description(&self) -> &str { - match *self { - TryRecvError::Empty => "receiving on an empty channel", - TryRecvError::Disconnected => "receiving on an empty and disconnected channel", - } - } - - fn cause(&self) -> Option<&dyn error::Error> { - None - } -} - -impl From for TryRecvError { - fn from(err: RecvError) -> TryRecvError { - match err { - RecvError => TryRecvError::Disconnected, - } - } -} - -impl TryRecvError { - /// Returns `true` if the receive operation failed because the channel is empty. - pub fn is_empty(&self) -> bool { - match self { - TryRecvError::Empty => true, - _ => false, - } - } - - /// Returns `true` if the receive operation failed because the channel is disconnected. - pub fn is_disconnected(&self) -> bool { - match self { - TryRecvError::Disconnected => true, - _ => false, - } - } -} - -impl fmt::Display for RecvTimeoutError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - RecvTimeoutError::Timeout => "timed out waiting on receive operation".fmt(f), - RecvTimeoutError::Disconnected => "channel is empty and disconnected".fmt(f), - } - } -} - -impl error::Error for RecvTimeoutError { - fn description(&self) -> &str { - match *self { - RecvTimeoutError::Timeout => "timed out waiting on receive operation", - RecvTimeoutError::Disconnected => "channel is empty and disconnected", - } - } - - fn cause(&self) -> Option<&dyn error::Error> { - None - } -} - -impl From for RecvTimeoutError { - fn from(err: RecvError) -> RecvTimeoutError { - match err { - RecvError => RecvTimeoutError::Disconnected, - } - } -} - -impl RecvTimeoutError { - /// Returns `true` if the receive operation timed out. - pub fn is_timeout(&self) -> bool { - match self { - RecvTimeoutError::Timeout => true, - _ => false, - } - } - - /// Returns `true` if the receive operation failed because the channel is disconnected. - pub fn is_disconnected(&self) -> bool { - match self { - RecvTimeoutError::Disconnected => true, - _ => false, - } - } -} - -impl fmt::Display for TrySelectError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - "all operations in select would block".fmt(f) - } -} - -impl error::Error for TrySelectError { - fn description(&self) -> &str { - "all operations in select would block" - } - - fn cause(&self) -> Option<&dyn error::Error> { - None - } -} - -impl fmt::Display for SelectTimeoutError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - "timed out waiting on select".fmt(f) - } -} - -impl error::Error for SelectTimeoutError { - fn description(&self) -> &str { - "timed out waiting on select" - } - - fn cause(&self) -> Option<&dyn error::Error> { - None - } -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/src/flavors/after.rs cargo-0.47.0/vendor/crossbeam-channel/src/flavors/after.rs --- cargo-0.44.1/vendor/crossbeam-channel/src/flavors/after.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/src/flavors/after.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,200 +0,0 @@ -//! Channel that delivers a message after a certain amount of time. -//! -//! Messages cannot be sent into this kind of channel; they are materialized on demand. - -use std::sync::atomic::{AtomicBool, Ordering}; -use std::thread; -use std::time::{Duration, Instant}; - -use context::Context; -use err::{RecvTimeoutError, TryRecvError}; -use select::{Operation, SelectHandle, Token}; -use utils; - -/// Result of a receive operation. -pub type AfterToken = Option; - -/// Channel that delivers a message after a certain amount of time. -pub struct Channel { - /// The instant at which the message will be delivered. - delivery_time: Instant, - - /// `true` if the message has been received. - received: AtomicBool, -} - -impl Channel { - /// Creates a channel that delivers a message after a certain duration of time. - #[inline] - pub fn new(dur: Duration) -> Self { - Channel { - delivery_time: Instant::now() + dur, - received: AtomicBool::new(false), - } - } - - /// Attempts to receive a message without blocking. - #[inline] - pub fn try_recv(&self) -> Result { - // We use relaxed ordering because this is just an optional optimistic check. - if self.received.load(Ordering::Relaxed) { - // The message has already been received. - return Err(TryRecvError::Empty); - } - - if Instant::now() < self.delivery_time { - // The message was not delivered yet. - return Err(TryRecvError::Empty); - } - - // Try receiving the message if it is still available. - if !self.received.swap(true, Ordering::SeqCst) { - // Success! Return delivery time as the message. - Ok(self.delivery_time) - } else { - // The message was already received. - Err(TryRecvError::Empty) - } - } - - /// Receives a message from the channel. - #[inline] - pub fn recv(&self, deadline: Option) -> Result { - // We use relaxed ordering because this is just an optional optimistic check. - if self.received.load(Ordering::Relaxed) { - // The message has already been received. - utils::sleep_until(deadline); - return Err(RecvTimeoutError::Timeout); - } - - // Wait until the message is received or the deadline is reached. - loop { - let now = Instant::now(); - - // Check if we can receive the next message. - if now >= self.delivery_time { - break; - } - - // Check if the deadline has been reached. - if let Some(d) = deadline { - if now >= d { - return Err(RecvTimeoutError::Timeout); - } - - thread::sleep(self.delivery_time.min(d) - now); - } else { - thread::sleep(self.delivery_time - now); - } - } - - // Try receiving the message if it is still available. - if !self.received.swap(true, Ordering::SeqCst) { - // Success! Return the message, which is the instant at which it was delivered. - Ok(self.delivery_time) - } else { - // The message was already received. Block forever. - utils::sleep_until(None); - unreachable!() - } - } - - /// Reads a message from the channel. - #[inline] - pub unsafe fn read(&self, token: &mut Token) -> Result { - token.after.ok_or(()) - } - - /// Returns `true` if the channel is empty. - #[inline] - pub fn is_empty(&self) -> bool { - // We use relaxed ordering because this is just an optional optimistic check. - if self.received.load(Ordering::Relaxed) { - return true; - } - - // If the delivery time hasn't been reached yet, the channel is empty. - if Instant::now() < self.delivery_time { - return true; - } - - // The delivery time has been reached. The channel is empty only if the message has already - // been received. - self.received.load(Ordering::SeqCst) - } - - /// Returns `true` if the channel is full. - #[inline] - pub fn is_full(&self) -> bool { - !self.is_empty() - } - - /// Returns the number of messages in the channel. - #[inline] - pub fn len(&self) -> usize { - if self.is_empty() { - 0 - } else { - 1 - } - } - - /// Returns the capacity of the channel. - #[inline] - pub fn capacity(&self) -> Option { - Some(1) - } -} - -impl SelectHandle for Channel { - #[inline] - fn try_select(&self, token: &mut Token) -> bool { - match self.try_recv() { - Ok(msg) => { - token.after = Some(msg); - true - } - Err(TryRecvError::Disconnected) => { - token.after = None; - true - } - Err(TryRecvError::Empty) => false, - } - } - - #[inline] - fn deadline(&self) -> Option { - // We use relaxed ordering because this is just an optional optimistic check. - if self.received.load(Ordering::Relaxed) { - None - } else { - Some(self.delivery_time) - } - } - - #[inline] - fn register(&self, _oper: Operation, _cx: &Context) -> bool { - self.is_ready() - } - - #[inline] - fn unregister(&self, _oper: Operation) {} - - #[inline] - fn accept(&self, token: &mut Token, _cx: &Context) -> bool { - self.try_select(token) - } - - #[inline] - fn is_ready(&self) -> bool { - !self.is_empty() - } - - #[inline] - fn watch(&self, _oper: Operation, _cx: &Context) -> bool { - self.is_ready() - } - - #[inline] - fn unwatch(&self, _oper: Operation) {} -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/src/flavors/array.rs cargo-0.47.0/vendor/crossbeam-channel/src/flavors/array.rs --- cargo-0.44.1/vendor/crossbeam-channel/src/flavors/array.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/src/flavors/array.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,639 +0,0 @@ -//! Bounded channel based on a preallocated array. -//! -//! This flavor has a fixed, positive capacity. -//! -//! The implementation is based on Dmitry Vyukov's bounded MPMC queue. -//! -//! Source: -//! - http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue -//! - https://docs.google.com/document/d/1yIAYmbvL3JxOKOjuCyon7JhW4cSv1wy5hC0ApeGMV9s/pub -//! -//! Copyright & License: -//! - Copyright (c) 2010-2011 Dmitry Vyukov -//! - Simplified BSD License and Apache License, Version 2.0 -//! - http://www.1024cores.net/home/code-license - -use std::cell::UnsafeCell; -use std::marker::PhantomData; -use std::mem; -use std::ptr; -use std::sync::atomic::{self, AtomicUsize, Ordering}; -use std::time::Instant; - -use crossbeam_utils::{Backoff, CachePadded}; - -use maybe_uninit::MaybeUninit; - -use context::Context; -use err::{RecvTimeoutError, SendTimeoutError, TryRecvError, TrySendError}; -use select::{Operation, SelectHandle, Selected, Token}; -use waker::SyncWaker; - -/// A slot in a channel. -struct Slot { - /// The current stamp. - stamp: AtomicUsize, - - /// The message in this slot. - msg: UnsafeCell>, -} - -/// The token type for the array flavor. -#[derive(Debug)] -pub struct ArrayToken { - /// Slot to read from or write to. - slot: *const u8, - - /// Stamp to store into the slot after reading or writing. - stamp: usize, -} - -impl Default for ArrayToken { - #[inline] - fn default() -> Self { - ArrayToken { - slot: ptr::null(), - stamp: 0, - } - } -} - -/// Bounded channel based on a preallocated array. -pub struct Channel { - /// The head of the channel. - /// - /// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but - /// packed into a single `usize`. The lower bits represent the index, while the upper bits - /// represent the lap. The mark bit in the head is always zero. - /// - /// Messages are popped from the head of the channel. - head: CachePadded, - - /// The tail of the channel. - /// - /// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but - /// packed into a single `usize`. The lower bits represent the index, while the upper bits - /// represent the lap. The mark bit indicates that the channel is disconnected. - /// - /// Messages are pushed into the tail of the channel. - tail: CachePadded, - - /// The buffer holding slots. - buffer: *mut Slot, - - /// The channel capacity. - cap: usize, - - /// A stamp with the value of `{ lap: 1, mark: 0, index: 0 }`. - one_lap: usize, - - /// If this bit is set in the tail, that means the channel is disconnected. - mark_bit: usize, - - /// Senders waiting while the channel is full. - senders: SyncWaker, - - /// Receivers waiting while the channel is empty and not disconnected. - receivers: SyncWaker, - - /// Indicates that dropping a `Channel` may drop values of type `T`. - _marker: PhantomData, -} - -impl Channel { - /// Creates a bounded channel of capacity `cap`. - pub fn with_capacity(cap: usize) -> Self { - assert!(cap > 0, "capacity must be positive"); - - // Compute constants `mark_bit` and `one_lap`. - let mark_bit = (cap + 1).next_power_of_two(); - let one_lap = mark_bit * 2; - - // Head is initialized to `{ lap: 0, mark: 0, index: 0 }`. - let head = 0; - // Tail is initialized to `{ lap: 0, mark: 0, index: 0 }`. - let tail = 0; - - // Allocate a buffer of `cap` slots. - let buffer = { - let mut v = Vec::>::with_capacity(cap); - let ptr = v.as_mut_ptr(); - mem::forget(v); - ptr - }; - - // Initialize stamps in the slots. - for i in 0..cap { - unsafe { - // Set the stamp to `{ lap: 0, mark: 0, index: i }`. - let slot = buffer.add(i); - ptr::write(&mut (*slot).stamp, AtomicUsize::new(i)); - } - } - - Channel { - buffer, - cap, - one_lap, - mark_bit, - head: CachePadded::new(AtomicUsize::new(head)), - tail: CachePadded::new(AtomicUsize::new(tail)), - senders: SyncWaker::new(), - receivers: SyncWaker::new(), - _marker: PhantomData, - } - } - - /// Returns a receiver handle to the channel. - pub fn receiver(&self) -> Receiver { - Receiver(self) - } - - /// Returns a sender handle to the channel. - pub fn sender(&self) -> Sender { - Sender(self) - } - - /// Attempts to reserve a slot for sending a message. - fn start_send(&self, token: &mut Token) -> bool { - let backoff = Backoff::new(); - let mut tail = self.tail.load(Ordering::Relaxed); - - loop { - // Check if the channel is disconnected. - if tail & self.mark_bit != 0 { - token.array.slot = ptr::null(); - token.array.stamp = 0; - return true; - } - - // Deconstruct the tail. - let index = tail & (self.mark_bit - 1); - let lap = tail & !(self.one_lap - 1); - - // Inspect the corresponding slot. - let slot = unsafe { &*self.buffer.add(index) }; - let stamp = slot.stamp.load(Ordering::Acquire); - - // If the tail and the stamp match, we may attempt to push. - if tail == stamp { - let new_tail = if index + 1 < self.cap { - // Same lap, incremented index. - // Set to `{ lap: lap, mark: 0, index: index + 1 }`. - tail + 1 - } else { - // One lap forward, index wraps around to zero. - // Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`. - lap.wrapping_add(self.one_lap) - }; - - // Try moving the tail. - match self.tail.compare_exchange_weak( - tail, - new_tail, - Ordering::SeqCst, - Ordering::Relaxed, - ) { - Ok(_) => { - // Prepare the token for the follow-up call to `write`. - token.array.slot = slot as *const Slot as *const u8; - token.array.stamp = tail + 1; - return true; - } - Err(t) => { - tail = t; - backoff.spin(); - } - } - } else if stamp.wrapping_add(self.one_lap) == tail + 1 { - atomic::fence(Ordering::SeqCst); - let head = self.head.load(Ordering::Relaxed); - - // If the head lags one lap behind the tail as well... - if head.wrapping_add(self.one_lap) == tail { - // ...then the channel is full. - return false; - } - - backoff.spin(); - tail = self.tail.load(Ordering::Relaxed); - } else { - // Snooze because we need to wait for the stamp to get updated. - backoff.snooze(); - tail = self.tail.load(Ordering::Relaxed); - } - } - } - - /// Writes a message into the channel. - pub unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> { - // If there is no slot, the channel is disconnected. - if token.array.slot.is_null() { - return Err(msg); - } - - let slot: &Slot = &*(token.array.slot as *const Slot); - - // Write the message into the slot and update the stamp. - slot.msg.get().write(MaybeUninit::new(msg)); - slot.stamp.store(token.array.stamp, Ordering::Release); - - // Wake a sleeping receiver. - self.receivers.notify(); - Ok(()) - } - - /// Attempts to reserve a slot for receiving a message. - fn start_recv(&self, token: &mut Token) -> bool { - let backoff = Backoff::new(); - let mut head = self.head.load(Ordering::Relaxed); - - loop { - // Deconstruct the head. - let index = head & (self.mark_bit - 1); - let lap = head & !(self.one_lap - 1); - - // Inspect the corresponding slot. - let slot = unsafe { &*self.buffer.add(index) }; - let stamp = slot.stamp.load(Ordering::Acquire); - - // If the the stamp is ahead of the head by 1, we may attempt to pop. - if head + 1 == stamp { - let new = if index + 1 < self.cap { - // Same lap, incremented index. - // Set to `{ lap: lap, mark: 0, index: index + 1 }`. - head + 1 - } else { - // One lap forward, index wraps around to zero. - // Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`. - lap.wrapping_add(self.one_lap) - }; - - // Try moving the head. - match self.head.compare_exchange_weak( - head, - new, - Ordering::SeqCst, - Ordering::Relaxed, - ) { - Ok(_) => { - // Prepare the token for the follow-up call to `read`. - token.array.slot = slot as *const Slot as *const u8; - token.array.stamp = head.wrapping_add(self.one_lap); - return true; - } - Err(h) => { - head = h; - backoff.spin(); - } - } - } else if stamp == head { - atomic::fence(Ordering::SeqCst); - let tail = self.tail.load(Ordering::Relaxed); - - // If the tail equals the head, that means the channel is empty. - if (tail & !self.mark_bit) == head { - // If the channel is disconnected... - if tail & self.mark_bit != 0 { - // ...then receive an error. - token.array.slot = ptr::null(); - token.array.stamp = 0; - return true; - } else { - // Otherwise, the receive operation is not ready. - return false; - } - } - - backoff.spin(); - head = self.head.load(Ordering::Relaxed); - } else { - // Snooze because we need to wait for the stamp to get updated. - backoff.snooze(); - head = self.head.load(Ordering::Relaxed); - } - } - } - - /// Reads a message from the channel. - pub unsafe fn read(&self, token: &mut Token) -> Result { - if token.array.slot.is_null() { - // The channel is disconnected. - return Err(()); - } - - let slot: &Slot = &*(token.array.slot as *const Slot); - - // Read the message from the slot and update the stamp. - let msg = slot.msg.get().read().assume_init(); - slot.stamp.store(token.array.stamp, Ordering::Release); - - // Wake a sleeping sender. - self.senders.notify(); - Ok(msg) - } - - /// Attempts to send a message into the channel. - pub fn try_send(&self, msg: T) -> Result<(), TrySendError> { - let token = &mut Token::default(); - if self.start_send(token) { - unsafe { self.write(token, msg).map_err(TrySendError::Disconnected) } - } else { - Err(TrySendError::Full(msg)) - } - } - - /// Sends a message into the channel. - pub fn send(&self, msg: T, deadline: Option) -> Result<(), SendTimeoutError> { - let token = &mut Token::default(); - loop { - // Try sending a message several times. - let backoff = Backoff::new(); - loop { - if self.start_send(token) { - let res = unsafe { self.write(token, msg) }; - return res.map_err(SendTimeoutError::Disconnected); - } - - if backoff.is_completed() { - break; - } else { - backoff.snooze(); - } - } - - if let Some(d) = deadline { - if Instant::now() >= d { - return Err(SendTimeoutError::Timeout(msg)); - } - } - - Context::with(|cx| { - // Prepare for blocking until a receiver wakes us up. - let oper = Operation::hook(token); - self.senders.register(oper, cx); - - // Has the channel become ready just now? - if !self.is_full() || self.is_disconnected() { - let _ = cx.try_select(Selected::Aborted); - } - - // Block the current thread. - let sel = cx.wait_until(deadline); - - match sel { - Selected::Waiting => unreachable!(), - Selected::Aborted | Selected::Disconnected => { - self.senders.unregister(oper).unwrap(); - } - Selected::Operation(_) => {} - } - }); - } - } - - /// Attempts to receive a message without blocking. - pub fn try_recv(&self) -> Result { - let token = &mut Token::default(); - - if self.start_recv(token) { - unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) } - } else { - Err(TryRecvError::Empty) - } - } - - /// Receives a message from the channel. - pub fn recv(&self, deadline: Option) -> Result { - let token = &mut Token::default(); - loop { - // Try receiving a message several times. - let backoff = Backoff::new(); - loop { - if self.start_recv(token) { - let res = unsafe { self.read(token) }; - return res.map_err(|_| RecvTimeoutError::Disconnected); - } - - if backoff.is_completed() { - break; - } else { - backoff.snooze(); - } - } - - if let Some(d) = deadline { - if Instant::now() >= d { - return Err(RecvTimeoutError::Timeout); - } - } - - Context::with(|cx| { - // Prepare for blocking until a sender wakes us up. - let oper = Operation::hook(token); - self.receivers.register(oper, cx); - - // Has the channel become ready just now? - if !self.is_empty() || self.is_disconnected() { - let _ = cx.try_select(Selected::Aborted); - } - - // Block the current thread. - let sel = cx.wait_until(deadline); - - match sel { - Selected::Waiting => unreachable!(), - Selected::Aborted | Selected::Disconnected => { - self.receivers.unregister(oper).unwrap(); - // If the channel was disconnected, we still have to check for remaining - // messages. - } - Selected::Operation(_) => {} - } - }); - } - } - - /// Returns the current number of messages inside the channel. - pub fn len(&self) -> usize { - loop { - // Load the tail, then load the head. - let tail = self.tail.load(Ordering::SeqCst); - let head = self.head.load(Ordering::SeqCst); - - // If the tail didn't change, we've got consistent values to work with. - if self.tail.load(Ordering::SeqCst) == tail { - let hix = head & (self.mark_bit - 1); - let tix = tail & (self.mark_bit - 1); - - return if hix < tix { - tix - hix - } else if hix > tix { - self.cap - hix + tix - } else if (tail & !self.mark_bit) == head { - 0 - } else { - self.cap - }; - } - } - } - - /// Returns the capacity of the channel. - pub fn capacity(&self) -> Option { - Some(self.cap) - } - - /// Disconnects the channel and wakes up all blocked senders and receivers. - /// - /// Returns `true` if this call disconnected the channel. - pub fn disconnect(&self) -> bool { - let tail = self.tail.fetch_or(self.mark_bit, Ordering::SeqCst); - - if tail & self.mark_bit == 0 { - self.senders.disconnect(); - self.receivers.disconnect(); - true - } else { - false - } - } - - /// Returns `true` if the channel is disconnected. - pub fn is_disconnected(&self) -> bool { - self.tail.load(Ordering::SeqCst) & self.mark_bit != 0 - } - - /// Returns `true` if the channel is empty. - pub fn is_empty(&self) -> bool { - let head = self.head.load(Ordering::SeqCst); - let tail = self.tail.load(Ordering::SeqCst); - - // Is the tail equal to the head? - // - // Note: If the head changes just before we load the tail, that means there was a moment - // when the channel was not empty, so it is safe to just return `false`. - (tail & !self.mark_bit) == head - } - - /// Returns `true` if the channel is full. - pub fn is_full(&self) -> bool { - let tail = self.tail.load(Ordering::SeqCst); - let head = self.head.load(Ordering::SeqCst); - - // Is the head lagging one lap behind tail? - // - // Note: If the tail changes just before we load the head, that means there was a moment - // when the channel was not full, so it is safe to just return `false`. - head.wrapping_add(self.one_lap) == tail & !self.mark_bit - } -} - -impl Drop for Channel { - fn drop(&mut self) { - // Get the index of the head. - let hix = self.head.load(Ordering::Relaxed) & (self.mark_bit - 1); - - // Loop over all slots that hold a message and drop them. - for i in 0..self.len() { - // Compute the index of the next slot holding a message. - let index = if hix + i < self.cap { - hix + i - } else { - hix + i - self.cap - }; - - unsafe { - let p = { - let slot = &mut *self.buffer.add(index); - let msg = &mut *slot.msg.get(); - msg.as_mut_ptr() - }; - p.drop_in_place(); - } - } - - // Finally, deallocate the buffer, but don't run any destructors. - unsafe { - Vec::from_raw_parts(self.buffer, 0, self.cap); - } - } -} - -/// Receiver handle to a channel. -pub struct Receiver<'a, T: 'a>(&'a Channel); - -/// Sender handle to a channel. -pub struct Sender<'a, T: 'a>(&'a Channel); - -impl<'a, T> SelectHandle for Receiver<'a, T> { - fn try_select(&self, token: &mut Token) -> bool { - self.0.start_recv(token) - } - - fn deadline(&self) -> Option { - None - } - - fn register(&self, oper: Operation, cx: &Context) -> bool { - self.0.receivers.register(oper, cx); - self.is_ready() - } - - fn unregister(&self, oper: Operation) { - self.0.receivers.unregister(oper); - } - - fn accept(&self, token: &mut Token, _cx: &Context) -> bool { - self.try_select(token) - } - - fn is_ready(&self) -> bool { - !self.0.is_empty() || self.0.is_disconnected() - } - - fn watch(&self, oper: Operation, cx: &Context) -> bool { - self.0.receivers.watch(oper, cx); - self.is_ready() - } - - fn unwatch(&self, oper: Operation) { - self.0.receivers.unwatch(oper); - } -} - -impl<'a, T> SelectHandle for Sender<'a, T> { - fn try_select(&self, token: &mut Token) -> bool { - self.0.start_send(token) - } - - fn deadline(&self) -> Option { - None - } - - fn register(&self, oper: Operation, cx: &Context) -> bool { - self.0.senders.register(oper, cx); - self.is_ready() - } - - fn unregister(&self, oper: Operation) { - self.0.senders.unregister(oper); - } - - fn accept(&self, token: &mut Token, _cx: &Context) -> bool { - self.try_select(token) - } - - fn is_ready(&self) -> bool { - !self.0.is_full() || self.0.is_disconnected() - } - - fn watch(&self, oper: Operation, cx: &Context) -> bool { - self.0.senders.watch(oper, cx); - self.is_ready() - } - - fn unwatch(&self, oper: Operation) { - self.0.senders.unwatch(oper); - } -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/src/flavors/list.rs cargo-0.47.0/vendor/crossbeam-channel/src/flavors/list.rs --- cargo-0.44.1/vendor/crossbeam-channel/src/flavors/list.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/src/flavors/list.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,671 +0,0 @@ -//! Unbounded channel implemented as a linked list. - -use std::cell::UnsafeCell; -use std::marker::PhantomData; -use std::ptr; -use std::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; -use std::time::Instant; - -use crossbeam_utils::{Backoff, CachePadded}; - -use maybe_uninit::MaybeUninit; - -use context::Context; -use err::{RecvTimeoutError, SendTimeoutError, TryRecvError, TrySendError}; -use select::{Operation, SelectHandle, Selected, Token}; -use waker::SyncWaker; - -// TODO(stjepang): Once we bump the minimum required Rust version to 1.28 or newer, re-apply the -// following changes by @kleimkuhler: -// -// 1. https://github.com/crossbeam-rs/crossbeam-channel/pull/100 -// 2. https://github.com/crossbeam-rs/crossbeam-channel/pull/101 - -// Bits indicating the state of a slot: -// * If a message has been written into the slot, `WRITE` is set. -// * If a message has been read from the slot, `READ` is set. -// * If the block is being destroyed, `DESTROY` is set. -const WRITE: usize = 1; -const READ: usize = 2; -const DESTROY: usize = 4; - -// Each block covers one "lap" of indices. -const LAP: usize = 32; -// The maximum number of messages a block can hold. -const BLOCK_CAP: usize = LAP - 1; -// How many lower bits are reserved for metadata. -const SHIFT: usize = 1; -// Has two different purposes: -// * If set in head, indicates that the block is not the last one. -// * If set in tail, indicates that the channel is disconnected. -const MARK_BIT: usize = 1; - -/// A slot in a block. -struct Slot { - /// The message. - msg: UnsafeCell>, - - /// The state of the slot. - state: AtomicUsize, -} - -impl Slot { - /// Waits until a message is written into the slot. - fn wait_write(&self) { - let backoff = Backoff::new(); - while self.state.load(Ordering::Acquire) & WRITE == 0 { - backoff.snooze(); - } - } -} - -/// A block in a linked list. -/// -/// Each block in the list can hold up to `BLOCK_CAP` messages. -struct Block { - /// The next block in the linked list. - next: AtomicPtr>, - - /// Slots for messages. - slots: [Slot; BLOCK_CAP], -} - -impl Block { - /// Creates an empty block. - fn new() -> Block { - // SAFETY: This is safe because: - // [1] `Block::next` (AtomicPtr) may be safely zero initialized. - // [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4]. - // [3] `Slot::msg` (UnsafeCell) may be safely zero initialized because it - // holds a MaybeUninit. - // [4] `Slot::state` (AtomicUsize) may be safely zero initialized. - unsafe { MaybeUninit::zeroed().assume_init() } - } - - /// Waits until the next pointer is set. - fn wait_next(&self) -> *mut Block { - let backoff = Backoff::new(); - loop { - let next = self.next.load(Ordering::Acquire); - if !next.is_null() { - return next; - } - backoff.snooze(); - } - } - - /// Sets the `DESTROY` bit in slots starting from `start` and destroys the block. - unsafe fn destroy(this: *mut Block, start: usize) { - // It is not necessary to set the `DESTROY bit in the last slot because that slot has begun - // destruction of the block. - for i in start..BLOCK_CAP - 1 { - let slot = (*this).slots.get_unchecked(i); - - // Mark the `DESTROY` bit if a thread is still using the slot. - if slot.state.load(Ordering::Acquire) & READ == 0 - && slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0 - { - // If a thread is still using the slot, it will continue destruction of the block. - return; - } - } - - // No thread is using the block, now it is safe to destroy it. - drop(Box::from_raw(this)); - } -} - -/// A position in a channel. -#[derive(Debug)] -struct Position { - /// The index in the channel. - index: AtomicUsize, - - /// The block in the linked list. - block: AtomicPtr>, -} - -/// The token type for the list flavor. -#[derive(Debug)] -pub struct ListToken { - /// The block of slots. - block: *const u8, - - /// The offset into the block. - offset: usize, -} - -impl Default for ListToken { - #[inline] - fn default() -> Self { - ListToken { - block: ptr::null(), - offset: 0, - } - } -} - -/// Unbounded channel implemented as a linked list. -/// -/// Each message sent into the channel is assigned a sequence number, i.e. an index. Indices are -/// represented as numbers of type `usize` and wrap on overflow. -/// -/// Consecutive messages are grouped into blocks in order to put less pressure on the allocator and -/// improve cache efficiency. -pub struct Channel { - /// The head of the channel. - head: CachePadded>, - - /// The tail of the channel. - tail: CachePadded>, - - /// Receivers waiting while the channel is empty and not disconnected. - receivers: SyncWaker, - - /// Indicates that dropping a `Channel` may drop messages of type `T`. - _marker: PhantomData, -} - -impl Channel { - /// Creates a new unbounded channel. - pub fn new() -> Self { - Channel { - head: CachePadded::new(Position { - block: AtomicPtr::new(ptr::null_mut()), - index: AtomicUsize::new(0), - }), - tail: CachePadded::new(Position { - block: AtomicPtr::new(ptr::null_mut()), - index: AtomicUsize::new(0), - }), - receivers: SyncWaker::new(), - _marker: PhantomData, - } - } - - /// Returns a receiver handle to the channel. - pub fn receiver(&self) -> Receiver { - Receiver(self) - } - - /// Returns a sender handle to the channel. - pub fn sender(&self) -> Sender { - Sender(self) - } - - /// Attempts to reserve a slot for sending a message. - fn start_send(&self, token: &mut Token) -> bool { - let backoff = Backoff::new(); - let mut tail = self.tail.index.load(Ordering::Acquire); - let mut block = self.tail.block.load(Ordering::Acquire); - let mut next_block = None; - - loop { - // Check if the channel is disconnected. - if tail & MARK_BIT != 0 { - token.list.block = ptr::null(); - return true; - } - - // Calculate the offset of the index into the block. - let offset = (tail >> SHIFT) % LAP; - - // If we reached the end of the block, wait until the next one is installed. - if offset == BLOCK_CAP { - backoff.snooze(); - tail = self.tail.index.load(Ordering::Acquire); - block = self.tail.block.load(Ordering::Acquire); - continue; - } - - // If we're going to have to install the next block, allocate it in advance in order to - // make the wait for other threads as short as possible. - if offset + 1 == BLOCK_CAP && next_block.is_none() { - next_block = Some(Box::new(Block::::new())); - } - - // If this is the first message to be sent into the channel, we need to allocate the - // first block and install it. - if block.is_null() { - let new = Box::into_raw(Box::new(Block::::new())); - - if self - .tail - .block - .compare_and_swap(block, new, Ordering::Release) - == block - { - self.head.block.store(new, Ordering::Release); - block = new; - } else { - next_block = unsafe { Some(Box::from_raw(new)) }; - tail = self.tail.index.load(Ordering::Acquire); - block = self.tail.block.load(Ordering::Acquire); - continue; - } - } - - let new_tail = tail + (1 << SHIFT); - - // Try advancing the tail forward. - match self.tail.index.compare_exchange_weak( - tail, - new_tail, - Ordering::SeqCst, - Ordering::Acquire, - ) { - Ok(_) => unsafe { - // If we've reached the end of the block, install the next one. - if offset + 1 == BLOCK_CAP { - let next_block = Box::into_raw(next_block.unwrap()); - self.tail.block.store(next_block, Ordering::Release); - self.tail.index.fetch_add(1 << SHIFT, Ordering::Release); - (*block).next.store(next_block, Ordering::Release); - } - - token.list.block = block as *const u8; - token.list.offset = offset; - return true; - }, - Err(t) => { - tail = t; - block = self.tail.block.load(Ordering::Acquire); - backoff.spin(); - } - } - } - } - - /// Writes a message into the channel. - pub unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> { - // If there is no slot, the channel is disconnected. - if token.list.block.is_null() { - return Err(msg); - } - - // Write the message into the slot. - let block = token.list.block as *mut Block; - let offset = token.list.offset; - let slot = (*block).slots.get_unchecked(offset); - slot.msg.get().write(MaybeUninit::new(msg)); - slot.state.fetch_or(WRITE, Ordering::Release); - - // Wake a sleeping receiver. - self.receivers.notify(); - Ok(()) - } - - /// Attempts to reserve a slot for receiving a message. - fn start_recv(&self, token: &mut Token) -> bool { - let backoff = Backoff::new(); - let mut head = self.head.index.load(Ordering::Acquire); - let mut block = self.head.block.load(Ordering::Acquire); - - loop { - // Calculate the offset of the index into the block. - let offset = (head >> SHIFT) % LAP; - - // If we reached the end of the block, wait until the next one is installed. - if offset == BLOCK_CAP { - backoff.snooze(); - head = self.head.index.load(Ordering::Acquire); - block = self.head.block.load(Ordering::Acquire); - continue; - } - - let mut new_head = head + (1 << SHIFT); - - if new_head & MARK_BIT == 0 { - atomic::fence(Ordering::SeqCst); - let tail = self.tail.index.load(Ordering::Relaxed); - - // If the tail equals the head, that means the channel is empty. - if head >> SHIFT == tail >> SHIFT { - // If the channel is disconnected... - if tail & MARK_BIT != 0 { - // ...then receive an error. - token.list.block = ptr::null(); - return true; - } else { - // Otherwise, the receive operation is not ready. - return false; - } - } - - // If head and tail are not in the same block, set `MARK_BIT` in head. - if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { - new_head |= MARK_BIT; - } - } - - // The block can be null here only if the first message is being sent into the channel. - // In that case, just wait until it gets initialized. - if block.is_null() { - backoff.snooze(); - head = self.head.index.load(Ordering::Acquire); - block = self.head.block.load(Ordering::Acquire); - continue; - } - - // Try moving the head index forward. - match self.head.index.compare_exchange_weak( - head, - new_head, - Ordering::SeqCst, - Ordering::Acquire, - ) { - Ok(_) => unsafe { - // If we've reached the end of the block, move to the next one. - if offset + 1 == BLOCK_CAP { - let next = (*block).wait_next(); - let mut next_index = (new_head & !MARK_BIT).wrapping_add(1 << SHIFT); - if !(*next).next.load(Ordering::Relaxed).is_null() { - next_index |= MARK_BIT; - } - - self.head.block.store(next, Ordering::Release); - self.head.index.store(next_index, Ordering::Release); - } - - token.list.block = block as *const u8; - token.list.offset = offset; - return true; - }, - Err(h) => { - head = h; - block = self.head.block.load(Ordering::Acquire); - backoff.spin(); - } - } - } - } - - /// Reads a message from the channel. - pub unsafe fn read(&self, token: &mut Token) -> Result { - if token.list.block.is_null() { - // The channel is disconnected. - return Err(()); - } - - // Read the message. - let block = token.list.block as *mut Block; - let offset = token.list.offset; - let slot = (*block).slots.get_unchecked(offset); - slot.wait_write(); - let msg = slot.msg.get().read().assume_init(); - - // Destroy the block if we've reached the end, or if another thread wanted to destroy but - // couldn't because we were busy reading from the slot. - if offset + 1 == BLOCK_CAP { - Block::destroy(block, 0); - } else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { - Block::destroy(block, offset + 1); - } - - Ok(msg) - } - - /// Attempts to send a message into the channel. - pub fn try_send(&self, msg: T) -> Result<(), TrySendError> { - self.send(msg, None).map_err(|err| match err { - SendTimeoutError::Disconnected(msg) => TrySendError::Disconnected(msg), - SendTimeoutError::Timeout(_) => unreachable!(), - }) - } - - /// Sends a message into the channel. - pub fn send(&self, msg: T, _deadline: Option) -> Result<(), SendTimeoutError> { - let token = &mut Token::default(); - assert!(self.start_send(token)); - unsafe { - self.write(token, msg) - .map_err(SendTimeoutError::Disconnected) - } - } - - /// Attempts to receive a message without blocking. - pub fn try_recv(&self) -> Result { - let token = &mut Token::default(); - - if self.start_recv(token) { - unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) } - } else { - Err(TryRecvError::Empty) - } - } - - /// Receives a message from the channel. - pub fn recv(&self, deadline: Option) -> Result { - let token = &mut Token::default(); - loop { - // Try receiving a message several times. - let backoff = Backoff::new(); - loop { - if self.start_recv(token) { - unsafe { - return self.read(token).map_err(|_| RecvTimeoutError::Disconnected); - } - } - - if backoff.is_completed() { - break; - } else { - backoff.snooze(); - } - } - - if let Some(d) = deadline { - if Instant::now() >= d { - return Err(RecvTimeoutError::Timeout); - } - } - - // Prepare for blocking until a sender wakes us up. - Context::with(|cx| { - let oper = Operation::hook(token); - self.receivers.register(oper, cx); - - // Has the channel become ready just now? - if !self.is_empty() || self.is_disconnected() { - let _ = cx.try_select(Selected::Aborted); - } - - // Block the current thread. - let sel = cx.wait_until(deadline); - - match sel { - Selected::Waiting => unreachable!(), - Selected::Aborted | Selected::Disconnected => { - self.receivers.unregister(oper).unwrap(); - // If the channel was disconnected, we still have to check for remaining - // messages. - } - Selected::Operation(_) => {} - } - }); - } - } - - /// Returns the current number of messages inside the channel. - pub fn len(&self) -> usize { - loop { - // Load the tail index, then load the head index. - let mut tail = self.tail.index.load(Ordering::SeqCst); - let mut head = self.head.index.load(Ordering::SeqCst); - - // If the tail index didn't change, we've got consistent indices to work with. - if self.tail.index.load(Ordering::SeqCst) == tail { - // Erase the lower bits. - tail &= !((1 << SHIFT) - 1); - head &= !((1 << SHIFT) - 1); - - // Rotate indices so that head falls into the first block. - let lap = (head >> SHIFT) / LAP; - tail = tail.wrapping_sub((lap * LAP) << SHIFT); - head = head.wrapping_sub((lap * LAP) << SHIFT); - - // Remove the lower bits. - tail >>= SHIFT; - head >>= SHIFT; - - // Fix up indices if they fall onto block ends. - if head == BLOCK_CAP { - head = 0; - tail -= LAP; - } - if tail == BLOCK_CAP { - tail += 1; - } - - // Return the difference minus the number of blocks between tail and head. - return tail - head - tail / LAP; - } - } - } - - /// Returns the capacity of the channel. - pub fn capacity(&self) -> Option { - None - } - - /// Disconnects the channel and wakes up all blocked receivers. - /// - /// Returns `true` if this call disconnected the channel. - pub fn disconnect(&self) -> bool { - let tail = self.tail.index.fetch_or(MARK_BIT, Ordering::SeqCst); - - if tail & MARK_BIT == 0 { - self.receivers.disconnect(); - true - } else { - false - } - } - - /// Returns `true` if the channel is disconnected. - pub fn is_disconnected(&self) -> bool { - self.tail.index.load(Ordering::SeqCst) & MARK_BIT != 0 - } - - /// Returns `true` if the channel is empty. - pub fn is_empty(&self) -> bool { - let head = self.head.index.load(Ordering::SeqCst); - let tail = self.tail.index.load(Ordering::SeqCst); - head >> SHIFT == tail >> SHIFT - } - - /// Returns `true` if the channel is full. - pub fn is_full(&self) -> bool { - false - } -} - -impl Drop for Channel { - fn drop(&mut self) { - let mut head = self.head.index.load(Ordering::Relaxed); - let mut tail = self.tail.index.load(Ordering::Relaxed); - let mut block = self.head.block.load(Ordering::Relaxed); - - // Erase the lower bits. - head &= !((1 << SHIFT) - 1); - tail &= !((1 << SHIFT) - 1); - - unsafe { - // Drop all messages between head and tail and deallocate the heap-allocated blocks. - while head != tail { - let offset = (head >> SHIFT) % LAP; - - if offset < BLOCK_CAP { - // Drop the message in the slot. - let slot = (*block).slots.get_unchecked(offset); - let p = &mut *slot.msg.get(); - p.as_mut_ptr().drop_in_place(); - } else { - // Deallocate the block and move to the next one. - let next = (*block).next.load(Ordering::Relaxed); - drop(Box::from_raw(block)); - block = next; - } - - head = head.wrapping_add(1 << SHIFT); - } - - // Deallocate the last remaining block. - if !block.is_null() { - drop(Box::from_raw(block)); - } - } - } -} - -/// Receiver handle to a channel. -pub struct Receiver<'a, T: 'a>(&'a Channel); - -/// Sender handle to a channel. -pub struct Sender<'a, T: 'a>(&'a Channel); - -impl<'a, T> SelectHandle for Receiver<'a, T> { - fn try_select(&self, token: &mut Token) -> bool { - self.0.start_recv(token) - } - - fn deadline(&self) -> Option { - None - } - - fn register(&self, oper: Operation, cx: &Context) -> bool { - self.0.receivers.register(oper, cx); - self.is_ready() - } - - fn unregister(&self, oper: Operation) { - self.0.receivers.unregister(oper); - } - - fn accept(&self, token: &mut Token, _cx: &Context) -> bool { - self.try_select(token) - } - - fn is_ready(&self) -> bool { - !self.0.is_empty() || self.0.is_disconnected() - } - - fn watch(&self, oper: Operation, cx: &Context) -> bool { - self.0.receivers.watch(oper, cx); - self.is_ready() - } - - fn unwatch(&self, oper: Operation) { - self.0.receivers.unwatch(oper); - } -} - -impl<'a, T> SelectHandle for Sender<'a, T> { - fn try_select(&self, token: &mut Token) -> bool { - self.0.start_send(token) - } - - fn deadline(&self) -> Option { - None - } - - fn register(&self, _oper: Operation, _cx: &Context) -> bool { - self.is_ready() - } - - fn unregister(&self, _oper: Operation) {} - - fn accept(&self, token: &mut Token, _cx: &Context) -> bool { - self.try_select(token) - } - - fn is_ready(&self) -> bool { - true - } - - fn watch(&self, _oper: Operation, _cx: &Context) -> bool { - self.is_ready() - } - - fn unwatch(&self, _oper: Operation) {} -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/src/flavors/mod.rs cargo-0.47.0/vendor/crossbeam-channel/src/flavors/mod.rs --- cargo-0.44.1/vendor/crossbeam-channel/src/flavors/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/src/flavors/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -//! Channel flavors. -//! -//! There are six flavors: -//! -//! 1. `after` - Channel that delivers a message after a certain amount of time. -//! 2. `array` - Bounded channel based on a preallocated array. -//! 3. `list` - Unbounded channel implemented as a linked list. -//! 4. `never` - Channel that never delivers messages. -//! 5. `tick` - Channel that delivers messages periodically. -//! 6. `zero` - Zero-capacity channel. - -pub mod after; -pub mod array; -pub mod list; -pub mod never; -pub mod tick; -pub mod zero; diff -Nru cargo-0.44.1/vendor/crossbeam-channel/src/flavors/never.rs cargo-0.47.0/vendor/crossbeam-channel/src/flavors/never.rs --- cargo-0.44.1/vendor/crossbeam-channel/src/flavors/never.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/src/flavors/never.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,110 +0,0 @@ -//! Channel that never delivers messages. -//! -//! Messages cannot be sent into this kind of channel. - -use std::marker::PhantomData; -use std::time::Instant; - -use context::Context; -use err::{RecvTimeoutError, TryRecvError}; -use select::{Operation, SelectHandle, Token}; -use utils; - -/// This flavor doesn't need a token. -pub type NeverToken = (); - -/// Channel that never delivers messages. -pub struct Channel { - _marker: PhantomData, -} - -impl Channel { - /// Creates a channel that never delivers messages. - #[inline] - pub fn new() -> Self { - Channel { - _marker: PhantomData, - } - } - - /// Attempts to receive a message without blocking. - #[inline] - pub fn try_recv(&self) -> Result { - Err(TryRecvError::Empty) - } - - /// Receives a message from the channel. - #[inline] - pub fn recv(&self, deadline: Option) -> Result { - utils::sleep_until(deadline); - Err(RecvTimeoutError::Timeout) - } - - /// Reads a message from the channel. - #[inline] - pub unsafe fn read(&self, _token: &mut Token) -> Result { - Err(()) - } - - /// Returns `true` if the channel is empty. - #[inline] - pub fn is_empty(&self) -> bool { - true - } - - /// Returns `true` if the channel is full. - #[inline] - pub fn is_full(&self) -> bool { - true - } - - /// Returns the number of messages in the channel. - #[inline] - pub fn len(&self) -> usize { - 0 - } - - /// Returns the capacity of the channel. - #[inline] - pub fn capacity(&self) -> Option { - Some(0) - } -} - -impl SelectHandle for Channel { - #[inline] - fn try_select(&self, _token: &mut Token) -> bool { - false - } - - #[inline] - fn deadline(&self) -> Option { - None - } - - #[inline] - fn register(&self, _oper: Operation, _cx: &Context) -> bool { - self.is_ready() - } - - #[inline] - fn unregister(&self, _oper: Operation) {} - - #[inline] - fn accept(&self, token: &mut Token, _cx: &Context) -> bool { - self.try_select(token) - } - - #[inline] - fn is_ready(&self) -> bool { - false - } - - #[inline] - fn watch(&self, _oper: Operation, _cx: &Context) -> bool { - self.is_ready() - } - - #[inline] - fn unwatch(&self, _oper: Operation) {} -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/src/flavors/tick.rs cargo-0.47.0/vendor/crossbeam-channel/src/flavors/tick.rs --- cargo-0.44.1/vendor/crossbeam-channel/src/flavors/tick.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/src/flavors/tick.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,167 +0,0 @@ -//! Channel that delivers messages periodically. -//! -//! Messages cannot be sent into this kind of channel; they are materialized on demand. - -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_utils::atomic::AtomicCell; - -use context::Context; -use err::{RecvTimeoutError, TryRecvError}; -use select::{Operation, SelectHandle, Token}; - -/// Result of a receive operation. -pub type TickToken = Option; - -/// Channel that delivers messages periodically. -pub struct Channel { - /// The instant at which the next message will be delivered. - delivery_time: AtomicCell, - - /// The time interval in which messages get delivered. - duration: Duration, -} - -impl Channel { - /// Creates a channel that delivers messages periodically. - #[inline] - pub fn new(dur: Duration) -> Self { - Channel { - delivery_time: AtomicCell::new(Instant::now() + dur), - duration: dur, - } - } - - /// Attempts to receive a message without blocking. - #[inline] - pub fn try_recv(&self) -> Result { - loop { - let now = Instant::now(); - let delivery_time = self.delivery_time.load(); - - if now < delivery_time { - return Err(TryRecvError::Empty); - } - - if self - .delivery_time - .compare_exchange(delivery_time, now + self.duration) - .is_ok() - { - return Ok(delivery_time); - } - } - } - - /// Receives a message from the channel. - #[inline] - pub fn recv(&self, deadline: Option) -> Result { - loop { - let delivery_time = self.delivery_time.load(); - let now = Instant::now(); - - if let Some(d) = deadline { - if d < delivery_time { - if now < d { - thread::sleep(d - now); - } - return Err(RecvTimeoutError::Timeout); - } - } - - if self - .delivery_time - .compare_exchange(delivery_time, delivery_time.max(now) + self.duration) - .is_ok() - { - if now < delivery_time { - thread::sleep(delivery_time - now); - } - return Ok(delivery_time); - } - } - } - - /// Reads a message from the channel. - #[inline] - pub unsafe fn read(&self, token: &mut Token) -> Result { - token.tick.ok_or(()) - } - - /// Returns `true` if the channel is empty. - #[inline] - pub fn is_empty(&self) -> bool { - Instant::now() < self.delivery_time.load() - } - - /// Returns `true` if the channel is full. - #[inline] - pub fn is_full(&self) -> bool { - !self.is_empty() - } - - /// Returns the number of messages in the channel. - #[inline] - pub fn len(&self) -> usize { - if self.is_empty() { - 0 - } else { - 1 - } - } - - /// Returns the capacity of the channel. - #[inline] - pub fn capacity(&self) -> Option { - Some(1) - } -} - -impl SelectHandle for Channel { - #[inline] - fn try_select(&self, token: &mut Token) -> bool { - match self.try_recv() { - Ok(msg) => { - token.tick = Some(msg); - true - } - Err(TryRecvError::Disconnected) => { - token.tick = None; - true - } - Err(TryRecvError::Empty) => false, - } - } - - #[inline] - fn deadline(&self) -> Option { - Some(self.delivery_time.load()) - } - - #[inline] - fn register(&self, _oper: Operation, _cx: &Context) -> bool { - self.is_ready() - } - - #[inline] - fn unregister(&self, _oper: Operation) {} - - #[inline] - fn accept(&self, token: &mut Token, _cx: &Context) -> bool { - self.try_select(token) - } - - #[inline] - fn is_ready(&self) -> bool { - !self.is_empty() - } - - #[inline] - fn watch(&self, _oper: Operation, _cx: &Context) -> bool { - self.is_ready() - } - - #[inline] - fn unwatch(&self, _oper: Operation) {} -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/src/flavors/zero.rs cargo-0.47.0/vendor/crossbeam-channel/src/flavors/zero.rs --- cargo-0.44.1/vendor/crossbeam-channel/src/flavors/zero.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/src/flavors/zero.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,466 +0,0 @@ -//! Zero-capacity channel. -//! -//! This kind of channel is also known as *rendezvous* channel. - -use std::cell::UnsafeCell; -use std::marker::PhantomData; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::time::Instant; - -use crossbeam_utils::Backoff; - -use context::Context; -use err::{RecvTimeoutError, SendTimeoutError, TryRecvError, TrySendError}; -use select::{Operation, SelectHandle, Selected, Token}; -use utils::Spinlock; -use waker::Waker; - -/// A pointer to a packet. -pub type ZeroToken = usize; - -/// A slot for passing one message from a sender to a receiver. -struct Packet { - /// Equals `true` if the packet is allocated on the stack. - on_stack: bool, - - /// Equals `true` once the packet is ready for reading or writing. - ready: AtomicBool, - - /// The message. - msg: UnsafeCell>, -} - -impl Packet { - /// Creates an empty packet on the stack. - fn empty_on_stack() -> Packet { - Packet { - on_stack: true, - ready: AtomicBool::new(false), - msg: UnsafeCell::new(None), - } - } - - /// Creates an empty packet on the heap. - fn empty_on_heap() -> Box> { - Box::new(Packet { - on_stack: false, - ready: AtomicBool::new(false), - msg: UnsafeCell::new(None), - }) - } - - /// Creates a packet on the stack, containing a message. - fn message_on_stack(msg: T) -> Packet { - Packet { - on_stack: true, - ready: AtomicBool::new(false), - msg: UnsafeCell::new(Some(msg)), - } - } - - /// Waits until the packet becomes ready for reading or writing. - fn wait_ready(&self) { - let backoff = Backoff::new(); - while !self.ready.load(Ordering::Acquire) { - backoff.snooze(); - } - } -} - -/// Inner representation of a zero-capacity channel. -struct Inner { - /// Senders waiting to pair up with a receive operation. - senders: Waker, - - /// Receivers waiting to pair up with a send operation. - receivers: Waker, - - /// Equals `true` when the channel is disconnected. - is_disconnected: bool, -} - -/// Zero-capacity channel. -pub struct Channel { - /// Inner representation of the channel. - inner: Spinlock, - - /// Indicates that dropping a `Channel` may drop values of type `T`. - _marker: PhantomData, -} - -impl Channel { - /// Constructs a new zero-capacity channel. - pub fn new() -> Self { - Channel { - inner: Spinlock::new(Inner { - senders: Waker::new(), - receivers: Waker::new(), - is_disconnected: false, - }), - _marker: PhantomData, - } - } - - /// Returns a receiver handle to the channel. - pub fn receiver(&self) -> Receiver { - Receiver(self) - } - - /// Returns a sender handle to the channel. - pub fn sender(&self) -> Sender { - Sender(self) - } - - /// Attempts to reserve a slot for sending a message. - fn start_send(&self, token: &mut Token) -> bool { - let mut inner = self.inner.lock(); - - // If there's a waiting receiver, pair up with it. - if let Some(operation) = inner.receivers.try_select() { - token.zero = operation.packet; - true - } else if inner.is_disconnected { - token.zero = 0; - true - } else { - false - } - } - - /// Writes a message into the packet. - pub unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> { - // If there is no packet, the channel is disconnected. - if token.zero == 0 { - return Err(msg); - } - - let packet = &*(token.zero as *const Packet); - packet.msg.get().write(Some(msg)); - packet.ready.store(true, Ordering::Release); - Ok(()) - } - - /// Attempts to pair up with a sender. - fn start_recv(&self, token: &mut Token) -> bool { - let mut inner = self.inner.lock(); - - // If there's a waiting sender, pair up with it. - if let Some(operation) = inner.senders.try_select() { - token.zero = operation.packet; - true - } else if inner.is_disconnected { - token.zero = 0; - true - } else { - false - } - } - - /// Reads a message from the packet. - pub unsafe fn read(&self, token: &mut Token) -> Result { - // If there is no packet, the channel is disconnected. - if token.zero == 0 { - return Err(()); - } - - let packet = &*(token.zero as *const Packet); - - if packet.on_stack { - // The message has been in the packet from the beginning, so there is no need to wait - // for it. However, after reading the message, we need to set `ready` to `true` in - // order to signal that the packet can be destroyed. - let msg = packet.msg.get().replace(None).unwrap(); - packet.ready.store(true, Ordering::Release); - Ok(msg) - } else { - // Wait until the message becomes available, then read it and destroy the - // heap-allocated packet. - packet.wait_ready(); - let msg = packet.msg.get().replace(None).unwrap(); - drop(Box::from_raw(packet as *const Packet as *mut Packet)); - Ok(msg) - } - } - - /// Attempts to send a message into the channel. - pub fn try_send(&self, msg: T) -> Result<(), TrySendError> { - let token = &mut Token::default(); - let mut inner = self.inner.lock(); - - // If there's a waiting receiver, pair up with it. - if let Some(operation) = inner.receivers.try_select() { - token.zero = operation.packet; - drop(inner); - unsafe { - self.write(token, msg).ok().unwrap(); - } - Ok(()) - } else if inner.is_disconnected { - Err(TrySendError::Disconnected(msg)) - } else { - Err(TrySendError::Full(msg)) - } - } - - /// Sends a message into the channel. - pub fn send(&self, msg: T, deadline: Option) -> Result<(), SendTimeoutError> { - let token = &mut Token::default(); - let mut inner = self.inner.lock(); - - // If there's a waiting receiver, pair up with it. - if let Some(operation) = inner.receivers.try_select() { - token.zero = operation.packet; - drop(inner); - unsafe { - self.write(token, msg).ok().unwrap(); - } - return Ok(()); - } - - if inner.is_disconnected { - return Err(SendTimeoutError::Disconnected(msg)); - } - - Context::with(|cx| { - // Prepare for blocking until a receiver wakes us up. - let oper = Operation::hook(token); - let packet = Packet::::message_on_stack(msg); - inner - .senders - .register_with_packet(oper, &packet as *const Packet as usize, cx); - inner.receivers.notify(); - drop(inner); - - // Block the current thread. - let sel = cx.wait_until(deadline); - - match sel { - Selected::Waiting => unreachable!(), - Selected::Aborted => { - self.inner.lock().senders.unregister(oper).unwrap(); - let msg = unsafe { packet.msg.get().replace(None).unwrap() }; - Err(SendTimeoutError::Timeout(msg)) - } - Selected::Disconnected => { - self.inner.lock().senders.unregister(oper).unwrap(); - let msg = unsafe { packet.msg.get().replace(None).unwrap() }; - Err(SendTimeoutError::Disconnected(msg)) - } - Selected::Operation(_) => { - // Wait until the message is read, then drop the packet. - packet.wait_ready(); - Ok(()) - } - } - }) - } - - /// Attempts to receive a message without blocking. - pub fn try_recv(&self) -> Result { - let token = &mut Token::default(); - let mut inner = self.inner.lock(); - - // If there's a waiting sender, pair up with it. - if let Some(operation) = inner.senders.try_select() { - token.zero = operation.packet; - drop(inner); - unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) } - } else if inner.is_disconnected { - Err(TryRecvError::Disconnected) - } else { - Err(TryRecvError::Empty) - } - } - - /// Receives a message from the channel. - pub fn recv(&self, deadline: Option) -> Result { - let token = &mut Token::default(); - let mut inner = self.inner.lock(); - - // If there's a waiting sender, pair up with it. - if let Some(operation) = inner.senders.try_select() { - token.zero = operation.packet; - drop(inner); - unsafe { - return self.read(token).map_err(|_| RecvTimeoutError::Disconnected); - } - } - - if inner.is_disconnected { - return Err(RecvTimeoutError::Disconnected); - } - - Context::with(|cx| { - // Prepare for blocking until a sender wakes us up. - let oper = Operation::hook(token); - let packet = Packet::::empty_on_stack(); - inner - .receivers - .register_with_packet(oper, &packet as *const Packet as usize, cx); - inner.senders.notify(); - drop(inner); - - // Block the current thread. - let sel = cx.wait_until(deadline); - - match sel { - Selected::Waiting => unreachable!(), - Selected::Aborted => { - self.inner.lock().receivers.unregister(oper).unwrap(); - Err(RecvTimeoutError::Timeout) - } - Selected::Disconnected => { - self.inner.lock().receivers.unregister(oper).unwrap(); - Err(RecvTimeoutError::Disconnected) - } - Selected::Operation(_) => { - // Wait until the message is provided, then read it. - packet.wait_ready(); - unsafe { Ok(packet.msg.get().replace(None).unwrap()) } - } - } - }) - } - - /// Disconnects the channel and wakes up all blocked senders and receivers. - /// - /// Returns `true` if this call disconnected the channel. - pub fn disconnect(&self) -> bool { - let mut inner = self.inner.lock(); - - if !inner.is_disconnected { - inner.is_disconnected = true; - inner.senders.disconnect(); - inner.receivers.disconnect(); - true - } else { - false - } - } - - /// Returns the current number of messages inside the channel. - pub fn len(&self) -> usize { - 0 - } - - /// Returns the capacity of the channel. - pub fn capacity(&self) -> Option { - Some(0) - } - - /// Returns `true` if the channel is empty. - pub fn is_empty(&self) -> bool { - true - } - - /// Returns `true` if the channel is full. - pub fn is_full(&self) -> bool { - true - } -} - -/// Receiver handle to a channel. -pub struct Receiver<'a, T: 'a>(&'a Channel); - -/// Sender handle to a channel. -pub struct Sender<'a, T: 'a>(&'a Channel); - -impl<'a, T> SelectHandle for Receiver<'a, T> { - fn try_select(&self, token: &mut Token) -> bool { - self.0.start_recv(token) - } - - fn deadline(&self) -> Option { - None - } - - fn register(&self, oper: Operation, cx: &Context) -> bool { - let packet = Box::into_raw(Packet::::empty_on_heap()); - - let mut inner = self.0.inner.lock(); - inner - .receivers - .register_with_packet(oper, packet as usize, cx); - inner.senders.notify(); - inner.senders.can_select() || inner.is_disconnected - } - - fn unregister(&self, oper: Operation) { - if let Some(operation) = self.0.inner.lock().receivers.unregister(oper) { - unsafe { - drop(Box::from_raw(operation.packet as *mut Packet)); - } - } - } - - fn accept(&self, token: &mut Token, cx: &Context) -> bool { - token.zero = cx.wait_packet(); - true - } - - fn is_ready(&self) -> bool { - let inner = self.0.inner.lock(); - inner.senders.can_select() || inner.is_disconnected - } - - fn watch(&self, oper: Operation, cx: &Context) -> bool { - let mut inner = self.0.inner.lock(); - inner.receivers.watch(oper, cx); - inner.senders.can_select() || inner.is_disconnected - } - - fn unwatch(&self, oper: Operation) { - let mut inner = self.0.inner.lock(); - inner.receivers.unwatch(oper); - } -} - -impl<'a, T> SelectHandle for Sender<'a, T> { - fn try_select(&self, token: &mut Token) -> bool { - self.0.start_send(token) - } - - fn deadline(&self) -> Option { - None - } - - fn register(&self, oper: Operation, cx: &Context) -> bool { - let packet = Box::into_raw(Packet::::empty_on_heap()); - - let mut inner = self.0.inner.lock(); - inner - .senders - .register_with_packet(oper, packet as usize, cx); - inner.receivers.notify(); - inner.receivers.can_select() || inner.is_disconnected - } - - fn unregister(&self, oper: Operation) { - if let Some(operation) = self.0.inner.lock().senders.unregister(oper) { - unsafe { - drop(Box::from_raw(operation.packet as *mut Packet)); - } - } - } - - fn accept(&self, token: &mut Token, cx: &Context) -> bool { - token.zero = cx.wait_packet(); - true - } - - fn is_ready(&self) -> bool { - let inner = self.0.inner.lock(); - inner.receivers.can_select() || inner.is_disconnected - } - - fn watch(&self, oper: Operation, cx: &Context) -> bool { - let mut inner = self.0.inner.lock(); - inner.senders.watch(oper, cx); - inner.receivers.can_select() || inner.is_disconnected - } - - fn unwatch(&self, oper: Operation) { - let mut inner = self.0.inner.lock(); - inner.senders.unwatch(oper); - } -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/src/lib.rs cargo-0.47.0/vendor/crossbeam-channel/src/lib.rs --- cargo-0.44.1/vendor/crossbeam-channel/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,379 +0,0 @@ -//! Multi-producer multi-consumer channels for message passing. -//! -//! This crate is an alternative to [`std::sync::mpsc`] with more features and better performance. -//! -//! # Hello, world! -//! -//! ``` -//! use crossbeam_channel::unbounded; -//! -//! // Create a channel of unbounded capacity. -//! let (s, r) = unbounded(); -//! -//! // Send a message into the channel. -//! s.send("Hello, world!").unwrap(); -//! -//! // Receive the message from the channel. -//! assert_eq!(r.recv(), Ok("Hello, world!")); -//! ``` -//! -//! # Channel types -//! -//! Channels can be created using two functions: -//! -//! * [`bounded`] creates a channel of bounded capacity, i.e. there is a limit to how many messages -//! it can hold at a time. -//! -//! * [`unbounded`] creates a channel of unbounded capacity, i.e. it can hold any number of -//! messages at a time. -//! -//! Both functions return a [`Sender`] and a [`Receiver`], which represent the two opposite sides -//! of a channel. -//! -//! Creating a bounded channel: -//! -//! ``` -//! use crossbeam_channel::bounded; -//! -//! // Create a channel that can hold at most 5 messages at a time. -//! let (s, r) = bounded(5); -//! -//! // Can send only 5 messages without blocking. -//! for i in 0..5 { -//! s.send(i).unwrap(); -//! } -//! -//! // Another call to `send` would block because the channel is full. -//! // s.send(5).unwrap(); -//! ``` -//! -//! Creating an unbounded channel: -//! -//! ``` -//! use crossbeam_channel::unbounded; -//! -//! // Create an unbounded channel. -//! let (s, r) = unbounded(); -//! -//! // Can send any number of messages into the channel without blocking. -//! for i in 0..1000 { -//! s.send(i).unwrap(); -//! } -//! ``` -//! -//! A special case is zero-capacity channel, which cannot hold any messages. Instead, send and -//! receive operations must appear at the same time in order to pair up and pass the message over: -//! -//! ``` -//! use std::thread; -//! use crossbeam_channel::bounded; -//! -//! // Create a zero-capacity channel. -//! let (s, r) = bounded(0); -//! -//! // Sending blocks until a receive operation appears on the other side. -//! thread::spawn(move || s.send("Hi!").unwrap()); -//! -//! // Receiving blocks until a send operation appears on the other side. -//! assert_eq!(r.recv(), Ok("Hi!")); -//! ``` -//! -//! # Sharing channels -//! -//! Senders and receivers can be cloned and sent to other threads: -//! -//! ``` -//! use std::thread; -//! use crossbeam_channel::bounded; -//! -//! let (s1, r1) = bounded(0); -//! let (s2, r2) = (s1.clone(), r1.clone()); -//! -//! // Spawn a thread that receives a message and then sends one. -//! thread::spawn(move || { -//! r2.recv().unwrap(); -//! s2.send(2).unwrap(); -//! }); -//! -//! // Send a message and then receive one. -//! s1.send(1).unwrap(); -//! r1.recv().unwrap(); -//! ``` -//! -//! Note that cloning only creates a new handle to the same sending or receiving side. It does not -//! create a separate stream of messages in any way: -//! -//! ``` -//! use crossbeam_channel::unbounded; -//! -//! let (s1, r1) = unbounded(); -//! let (s2, r2) = (s1.clone(), r1.clone()); -//! let (s3, r3) = (s2.clone(), r2.clone()); -//! -//! s1.send(10).unwrap(); -//! s2.send(20).unwrap(); -//! s3.send(30).unwrap(); -//! -//! assert_eq!(r3.recv(), Ok(10)); -//! assert_eq!(r1.recv(), Ok(20)); -//! assert_eq!(r2.recv(), Ok(30)); -//! ``` -//! -//! It's also possible to share senders and receivers by reference: -//! -//! ``` -//! # extern crate crossbeam_channel; -//! # extern crate crossbeam_utils; -//! # fn main() { -//! use std::thread; -//! use crossbeam_channel::bounded; -//! use crossbeam_utils::thread::scope; -//! -//! let (s, r) = bounded(0); -//! -//! scope(|scope| { -//! // Spawn a thread that receives a message and then sends one. -//! scope.spawn(|_| { -//! r.recv().unwrap(); -//! s.send(2).unwrap(); -//! }); -//! -//! // Send a message and then receive one. -//! s.send(1).unwrap(); -//! r.recv().unwrap(); -//! }).unwrap(); -//! # } -//! ``` -//! -//! # Disconnection -//! -//! When all senders or all receivers associated with a channel get dropped, the channel becomes -//! disconnected. No more messages can be sent, but any remaining messages can still be received. -//! Send and receive operations on a disconnected channel never block. -//! -//! ``` -//! use crossbeam_channel::{unbounded, RecvError}; -//! -//! let (s, r) = unbounded(); -//! s.send(1).unwrap(); -//! s.send(2).unwrap(); -//! s.send(3).unwrap(); -//! -//! // The only sender is dropped, disconnecting the channel. -//! drop(s); -//! -//! // The remaining messages can be received. -//! assert_eq!(r.recv(), Ok(1)); -//! assert_eq!(r.recv(), Ok(2)); -//! assert_eq!(r.recv(), Ok(3)); -//! -//! // There are no more messages in the channel. -//! assert!(r.is_empty()); -//! -//! // Note that calling `r.recv()` does not block. -//! // Instead, `Err(RecvError)` is returned immediately. -//! assert_eq!(r.recv(), Err(RecvError)); -//! ``` -//! -//! # Blocking operations -//! -//! Send and receive operations come in three flavors: -//! -//! * Non-blocking (returns immediately with success or failure). -//! * Blocking (waits until the operation succeeds or the channel becomes disconnected). -//! * Blocking with a timeout (blocks only for a certain duration of time). -//! -//! A simple example showing the difference between non-blocking and blocking operations: -//! -//! ``` -//! use crossbeam_channel::{bounded, RecvError, TryRecvError}; -//! -//! let (s, r) = bounded(1); -//! -//! // Send a message into the channel. -//! s.send("foo").unwrap(); -//! -//! // This call would block because the channel is full. -//! // s.send("bar").unwrap(); -//! -//! // Receive the message. -//! assert_eq!(r.recv(), Ok("foo")); -//! -//! // This call would block because the channel is empty. -//! // r.recv(); -//! -//! // Try receiving a message without blocking. -//! assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -//! -//! // Disconnect the channel. -//! drop(s); -//! -//! // This call doesn't block because the channel is now disconnected. -//! assert_eq!(r.recv(), Err(RecvError)); -//! ``` -//! -//! # Iteration -//! -//! Receivers can be used as iterators. For example, method [`iter`] creates an iterator that -//! receives messages until the channel becomes empty and disconnected. Note that iteration may -//! block waiting for next message to arrive. -//! -//! ``` -//! use std::thread; -//! use crossbeam_channel::unbounded; -//! -//! let (s, r) = unbounded(); -//! -//! thread::spawn(move || { -//! s.send(1).unwrap(); -//! s.send(2).unwrap(); -//! s.send(3).unwrap(); -//! drop(s); // Disconnect the channel. -//! }); -//! -//! // Collect all messages from the channel. -//! // Note that the call to `collect` blocks until the sender is dropped. -//! let v: Vec<_> = r.iter().collect(); -//! -//! assert_eq!(v, [1, 2, 3]); -//! ``` -//! -//! A non-blocking iterator can be created using [`try_iter`], which receives all available -//! messages without blocking: -//! -//! ``` -//! use crossbeam_channel::unbounded; -//! -//! let (s, r) = unbounded(); -//! s.send(1).unwrap(); -//! s.send(2).unwrap(); -//! s.send(3).unwrap(); -//! // No need to drop the sender. -//! -//! // Receive all messages currently in the channel. -//! let v: Vec<_> = r.try_iter().collect(); -//! -//! assert_eq!(v, [1, 2, 3]); -//! ``` -//! -//! # Selection -//! -//! The [`select!`] macro allows you to define a set of channel operations, wait until any one of -//! them becomes ready, and finally execute it. If multiple operations are ready at the same time, -//! a random one among them is selected. -//! -//! It is also possible to define a `default` case that gets executed if none of the operations are -//! ready, either right away or for a certain duration of time. -//! -//! An operation is considered to be ready if it doesn't have to block. Note that it is ready even -//! when it will simply return an error because the channel is disconnected. -//! -//! An example of receiving a message from two channels: -//! -//! ``` -//! # #[macro_use] -//! # extern crate crossbeam_channel; -//! # fn main() { -//! use std::thread; -//! use std::time::Duration; -//! use crossbeam_channel::unbounded; -//! -//! let (s1, r1) = unbounded(); -//! let (s2, r2) = unbounded(); -//! -//! thread::spawn(move || s1.send(10).unwrap()); -//! thread::spawn(move || s2.send(20).unwrap()); -//! -//! // At most one of these two receive operations will be executed. -//! select! { -//! recv(r1) -> msg => assert_eq!(msg, Ok(10)), -//! recv(r2) -> msg => assert_eq!(msg, Ok(20)), -//! default(Duration::from_secs(1)) => println!("timed out"), -//! } -//! # } -//! ``` -//! -//! If you need to select over a dynamically created list of channel operations, use [`Select`] -//! instead. The [`select!`] macro is just a convenience wrapper around [`Select`]. -//! -//! # Extra channels -//! -//! Three functions can create special kinds of channels, all of which return just a [`Receiver`] -//! handle: -//! -//! * [`after`] creates a channel that delivers a single message after a certain duration of time. -//! * [`tick`] creates a channel that delivers messages periodically. -//! * [`never`] creates a channel that never delivers messages. -//! -//! These channels are very efficient because messages get lazily generated on receive operations. -//! -//! An example that prints elapsed time every 50 milliseconds for the duration of 1 second: -//! -//! ``` -//! # #[macro_use] -//! # extern crate crossbeam_channel; -//! # fn main() { -//! use std::time::{Duration, Instant}; -//! use crossbeam_channel::{after, tick}; -//! -//! let start = Instant::now(); -//! let ticker = tick(Duration::from_millis(50)); -//! let timeout = after(Duration::from_secs(1)); -//! -//! loop { -//! select! { -//! recv(ticker) -> _ => println!("elapsed: {:?}", start.elapsed()), -//! recv(timeout) -> _ => break, -//! } -//! } -//! # } -//! ``` -//! -//! [`std::sync::mpsc`]: https://doc.rust-lang.org/std/sync/mpsc/index.html -//! [`unbounded`]: fn.unbounded.html -//! [`bounded`]: fn.bounded.html -//! [`after`]: fn.after.html -//! [`tick`]: fn.tick.html -//! [`never`]: fn.never.html -//! [`send`]: struct.Sender.html#method.send -//! [`recv`]: struct.Receiver.html#method.recv -//! [`iter`]: struct.Receiver.html#method.iter -//! [`try_iter`]: struct.Receiver.html#method.try_iter -//! [`select!`]: macro.select.html -//! [`Select`]: struct.Select.html -//! [`Sender`]: struct.Sender.html -//! [`Receiver`]: struct.Receiver.html - -#![warn(missing_docs)] -#![warn(missing_debug_implementations)] - -extern crate crossbeam_utils; -extern crate maybe_uninit; - -mod channel; -mod context; -mod counter; -mod err; -mod flavors; -mod select; -mod select_macro; -mod utils; -mod waker; - -/// Crate internals used by the `select!` macro. -#[doc(hidden)] -pub mod internal { - pub use select::SelectHandle; - pub use select::{select, select_timeout, try_select}; -} - -pub use channel::{after, never, tick}; -pub use channel::{bounded, unbounded}; -pub use channel::{IntoIter, Iter, TryIter}; -pub use channel::{Receiver, Sender}; - -pub use select::{Select, SelectedOperation}; - -pub use err::{ReadyTimeoutError, SelectTimeoutError, TryReadyError, TrySelectError}; -pub use err::{RecvError, RecvTimeoutError, TryRecvError}; -pub use err::{SendError, SendTimeoutError, TrySendError}; diff -Nru cargo-0.44.1/vendor/crossbeam-channel/src/select_macro.rs cargo-0.47.0/vendor/crossbeam-channel/src/select_macro.rs --- cargo-0.44.1/vendor/crossbeam-channel/src/select_macro.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/src/select_macro.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1214 +0,0 @@ -//! The `select!` macro. - -/// A simple wrapper around the standard macros. -/// -/// This is just an ugly workaround until it becomes possible to import macros with `use` -/// statements. -/// -/// TODO(stjepang): Once we bump the minimum required Rust version to 1.30 or newer, we should: -/// -/// 1. Remove all `#[macro_export(local_inner_macros)]` lines. -/// 2. Replace `crossbeam_channel_delegate` with direct macro invocations. -#[doc(hidden)] -#[macro_export] -macro_rules! crossbeam_channel_delegate { - (concat($($args:tt)*)) => { - concat!($($args)*) - }; - (stringify($($args:tt)*)) => { - stringify!($($args)*) - }; - (unreachable($($args:tt)*)) => { - unreachable!($($args)*) - }; - (compile_error($($args:tt)*)) => { - compile_error!($($args)*) - }; -} - -/// A helper macro for `select!` to hide the long list of macro patterns from the documentation. -/// -/// The macro consists of two stages: -/// 1. Parsing -/// 2. Code generation -/// -/// The parsing stage consists of these subparts: -/// 1. `@list`: Turns a list of tokens into a list of cases. -/// 2. `@list_errorN`: Diagnoses the syntax error. -/// 3. `@case`: Parses a single case and verifies its argument list. -/// -/// The codegen stage consists of these subparts: -/// 1. `@init`: Attempts to optimize `select!` away and initializes the list of handles. -/// 1. `@count`: Counts the listed cases. -/// 3. `@add`: Adds send/receive operations to the list of handles and starts selection. -/// 4. `@complete`: Completes the selected send/receive operation. -/// -/// If the parsing stage encounters a syntax error or the codegen stage ends up with too many -/// cases to process, the macro fails with a compile-time error. -#[doc(hidden)] -#[macro_export(local_inner_macros)] -macro_rules! crossbeam_channel_internal { - // The list is empty. Now check the arguments of each processed case. - (@list - () - ($($head:tt)*) - ) => { - crossbeam_channel_internal!( - @case - ($($head)*) - () - () - ) - }; - // If necessary, insert an empty argument list after `default`. - (@list - (default => $($tail:tt)*) - ($($head:tt)*) - ) => { - crossbeam_channel_internal!( - @list - (default() => $($tail)*) - ($($head)*) - ) - }; - // But print an error if `default` is followed by a `->`. - (@list - (default -> $($tail:tt)*) - ($($head:tt)*) - ) => { - crossbeam_channel_delegate!(compile_error( - "expected `=>` after `default` case, found `->`" - )) - }; - // Print an error if there's an `->` after the argument list in the default case. - (@list - (default $args:tt -> $($tail:tt)*) - ($($head:tt)*) - ) => { - crossbeam_channel_delegate!(compile_error( - "expected `=>` after `default` case, found `->`" - )) - }; - // Print an error if there is a missing result in a recv case. - (@list - (recv($($args:tt)*) => $($tail:tt)*) - ($($head:tt)*) - ) => { - crossbeam_channel_delegate!(compile_error( - "expected `->` after `recv` case, found `=>`" - )) - }; - // Print an error if there is a missing result in a send case. - (@list - (send($($args:tt)*) => $($tail:tt)*) - ($($head:tt)*) - ) => { - crossbeam_channel_delegate!(compile_error( - "expected `->` after `send` operation, found `=>`" - )) - }; - // Make sure the arrow and the result are not repeated. - (@list - ($case:ident $args:tt -> $res:tt -> $($tail:tt)*) - ($($head:tt)*) - ) => { - crossbeam_channel_delegate!(compile_error("expected `=>`, found `->`")) - }; - // Print an error if there is a semicolon after the block. - (@list - ($case:ident $args:tt $(-> $res:pat)* => $body:block; $($tail:tt)*) - ($($head:tt)*) - ) => { - crossbeam_channel_delegate!(compile_error( - "did you mean to put a comma instead of the semicolon after `}`?" - )) - }; - // The first case is separated by a comma. - (@list - ($case:ident ($($args:tt)*) $(-> $res:pat)* => $body:expr, $($tail:tt)*) - ($($head:tt)*) - ) => { - crossbeam_channel_internal!( - @list - ($($tail)*) - ($($head)* $case ($($args)*) $(-> $res)* => { $body },) - ) - }; - // Don't require a comma after the case if it has a proper block. - (@list - ($case:ident ($($args:tt)*) $(-> $res:pat)* => $body:block $($tail:tt)*) - ($($head:tt)*) - ) => { - crossbeam_channel_internal!( - @list - ($($tail)*) - ($($head)* $case ($($args)*) $(-> $res)* => { $body },) - ) - }; - // Only one case remains. - (@list - ($case:ident ($($args:tt)*) $(-> $res:pat)* => $body:expr) - ($($head:tt)*) - ) => { - crossbeam_channel_internal!( - @list - () - ($($head)* $case ($($args)*) $(-> $res)* => { $body },) - ) - }; - // Accept a trailing comma at the end of the list. - (@list - ($case:ident ($($args:tt)*) $(-> $res:pat)* => $body:expr,) - ($($head:tt)*) - ) => { - crossbeam_channel_internal!( - @list - () - ($($head)* $case ($($args)*) $(-> $res)* => { $body },) - ) - }; - // Diagnose and print an error. - (@list - ($($tail:tt)*) - ($($head:tt)*) - ) => { - crossbeam_channel_internal!(@list_error1 $($tail)*) - }; - // Stage 1: check the case type. - (@list_error1 recv $($tail:tt)*) => { - crossbeam_channel_internal!(@list_error2 recv $($tail)*) - }; - (@list_error1 send $($tail:tt)*) => { - crossbeam_channel_internal!(@list_error2 send $($tail)*) - }; - (@list_error1 default $($tail:tt)*) => { - crossbeam_channel_internal!(@list_error2 default $($tail)*) - }; - (@list_error1 $t:tt $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "expected one of `recv`, `send`, or `default`, found `", - crossbeam_channel_delegate!(stringify($t)), - "`", - )) - )) - }; - (@list_error1 $($tail:tt)*) => { - crossbeam_channel_internal!(@list_error2 $($tail)*); - }; - // Stage 2: check the argument list. - (@list_error2 $case:ident) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "missing argument list after `", - crossbeam_channel_delegate!(stringify($case)), - "`", - )) - )) - }; - (@list_error2 $case:ident => $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "missing argument list after `", - crossbeam_channel_delegate!(stringify($case)), - "`", - )) - )) - }; - (@list_error2 $($tail:tt)*) => { - crossbeam_channel_internal!(@list_error3 $($tail)*) - }; - // Stage 3: check the `=>` and what comes after it. - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "missing `=>` after `", - crossbeam_channel_delegate!(stringify($case)), - "` case", - )) - )) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* =>) => { - crossbeam_channel_delegate!(compile_error( - "expected expression after `=>`" - )) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $body:expr; $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "did you mean to put a comma instead of the semicolon after `", - crossbeam_channel_delegate!(stringify($body)), - "`?", - )) - )) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => recv($($a:tt)*) $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - "expected an expression after `=>`" - )) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => send($($a:tt)*) $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - "expected an expression after `=>`" - )) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => default($($a:tt)*) $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - "expected an expression after `=>`" - )) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $f:ident($($a:tt)*) $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "did you mean to put a comma after `", - crossbeam_channel_delegate!(stringify($f)), - "(", - crossbeam_channel_delegate!(stringify($($a)*)), - ")`?", - )) - )) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $f:ident!($($a:tt)*) $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "did you mean to put a comma after `", - crossbeam_channel_delegate!(stringify($f)), - "!(", - crossbeam_channel_delegate!(stringify($($a)*)), - ")`?", - )) - )) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $f:ident![$($a:tt)*] $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "did you mean to put a comma after `", - crossbeam_channel_delegate!(stringify($f)), - "![", - crossbeam_channel_delegate!(stringify($($a)*)), - "]`?", - )) - )) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $f:ident!{$($a:tt)*} $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "did you mean to put a comma after `", - crossbeam_channel_delegate!(stringify($f)), - "!{", - crossbeam_channel_delegate!(stringify($($a)*)), - "}`?", - )) - )) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $body:tt $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "did you mean to put a comma after `", - crossbeam_channel_delegate!(stringify($body)), - "`?", - )) - )) - }; - (@list_error3 $case:ident($($args:tt)*) -> => $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error("missing pattern after `->`")) - }; - (@list_error3 $case:ident($($args:tt)*) $t:tt $(-> $r:pat)* => $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "expected `->`, found `", - crossbeam_channel_delegate!(stringify($t)), - "`", - )) - )) - }; - (@list_error3 $case:ident($($args:tt)*) -> $t:tt $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "expected a pattern, found `", - crossbeam_channel_delegate!(stringify($t)), - "`", - )) - )) - }; - (@list_error3 recv($($args:tt)*) $t:tt $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "expected `->`, found `", - crossbeam_channel_delegate!(stringify($t)), - "`", - )) - )) - }; - (@list_error3 send($($args:tt)*) $t:tt $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "expected `->`, found `", - crossbeam_channel_delegate!(stringify($t)), - "`", - )) - )) - }; - (@list_error3 recv $args:tt $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "expected an argument list after `recv`, found `", - crossbeam_channel_delegate!(stringify($args)), - "`", - )) - )) - }; - (@list_error3 send $args:tt $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "expected an argument list after `send`, found `", - crossbeam_channel_delegate!(stringify($args)), - "`", - )) - )) - }; - (@list_error3 default $args:tt $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "expected an argument list or `=>` after `default`, found `", - crossbeam_channel_delegate!(stringify($args)), - "`", - )) - )) - }; - (@list_error3 $($tail:tt)*) => { - crossbeam_channel_internal!(@list_error4 $($tail)*) - }; - // Stage 4: fail with a generic error message. - (@list_error4 $($tail:tt)*) => { - crossbeam_channel_delegate!(compile_error("invalid syntax")) - }; - - // Success! All cases were parsed. - (@case - () - $cases:tt - $default:tt - ) => { - crossbeam_channel_internal!( - @init - $cases - $default - ) - }; - - // Check the format of a recv case. - (@case - (recv($r:expr) -> $res:pat => $body:tt, $($tail:tt)*) - ($($cases:tt)*) - $default:tt - ) => { - crossbeam_channel_internal!( - @case - ($($tail)*) - ($($cases)* recv($r) -> $res => $body,) - $default - ) - }; - // Allow trailing comma... - (@case - (recv($r:expr,) -> $res:pat => $body:tt, $($tail:tt)*) - ($($cases:tt)*) - $default:tt - ) => { - crossbeam_channel_internal!( - @case - ($($tail)*) - ($($cases)* recv($r) -> $res => $body,) - $default - ) - }; - // Print an error if the argument list is invalid. - (@case - (recv($($args:tt)*) -> $res:pat => $body:tt, $($tail:tt)*) - ($($cases:tt)*) - $default:tt - ) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "invalid argument list in `recv(", - crossbeam_channel_delegate!(stringify($($args)*)), - ")`", - )) - )) - }; - // Print an error if there is no argument list. - (@case - (recv $t:tt $($tail:tt)*) - ($($cases:tt)*) - $default:tt - ) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "expected an argument list after `recv`, found `", - crossbeam_channel_delegate!(stringify($t)), - "`", - )) - )) - }; - - // Check the format of a send case. - (@case - (send($s:expr, $m:expr) -> $res:pat => $body:tt, $($tail:tt)*) - ($($cases:tt)*) - $default:tt - ) => { - crossbeam_channel_internal!( - @case - ($($tail)*) - ($($cases)* send($s, $m) -> $res => $body,) - $default - ) - }; - // Allow trailing comma... - (@case - (send($s:expr, $m:expr,) -> $res:pat => $body:tt, $($tail:tt)*) - ($($cases:tt)*) - $default:tt - ) => { - crossbeam_channel_internal!( - @case - ($($tail)*) - ($($cases)* send($s, $m) -> $res => $body,) - $default - ) - }; - // Print an error if the argument list is invalid. - (@case - (send($($args:tt)*) -> $res:pat => $body:tt, $($tail:tt)*) - ($($cases:tt)*) - $default:tt - ) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "invalid argument list in `send(", - crossbeam_channel_delegate!(stringify($($args)*)), - ")`", - )) - )) - }; - // Print an error if there is no argument list. - (@case - (send $t:tt $($tail:tt)*) - ($($cases:tt)*) - $default:tt - ) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "expected an argument list after `send`, found `", - crossbeam_channel_delegate!(stringify($t)), - "`", - )) - )) - }; - - // Check the format of a default case. - (@case - (default() => $body:tt, $($tail:tt)*) - $cases:tt - () - ) => { - crossbeam_channel_internal!( - @case - ($($tail)*) - $cases - (default() => $body,) - ) - }; - // Check the format of a default case with timeout. - (@case - (default($timeout:expr) => $body:tt, $($tail:tt)*) - $cases:tt - () - ) => { - crossbeam_channel_internal!( - @case - ($($tail)*) - $cases - (default($timeout) => $body,) - ) - }; - // Allow trailing comma... - (@case - (default($timeout:expr,) => $body:tt, $($tail:tt)*) - $cases:tt - () - ) => { - crossbeam_channel_internal!( - @case - ($($tail)*) - $cases - (default($timeout) => $body,) - ) - }; - // Check for duplicate default cases... - (@case - (default $($tail:tt)*) - $cases:tt - ($($def:tt)+) - ) => { - crossbeam_channel_delegate!(compile_error( - "there can be only one `default` case in a `select!` block" - )) - }; - // Print an error if the argument list is invalid. - (@case - (default($($args:tt)*) => $body:tt, $($tail:tt)*) - $cases:tt - $default:tt - ) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "invalid argument list in `default(", - crossbeam_channel_delegate!(stringify($($args)*)), - ")`", - )) - )) - }; - // Print an error if there is an unexpected token after `default`. - (@case - (default $t:tt $($tail:tt)*) - $cases:tt - $default:tt - ) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "expected an argument list or `=>` after `default`, found `", - crossbeam_channel_delegate!(stringify($t)), - "`", - )) - )) - }; - - // The case was not consumed, therefore it must be invalid. - (@case - ($case:ident $($tail:tt)*) - $cases:tt - $default:tt - ) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "expected one of `recv`, `send`, or `default`, found `", - crossbeam_channel_delegate!(stringify($case)), - "`", - )) - )) - }; - - // Optimize `select!` into `try_recv()`. - (@init - (recv($r:expr) -> $res:pat => $recv_body:tt,) - (default() => $default_body:tt,) - ) => {{ - match $r { - ref _r => { - let _r: &$crate::Receiver<_> = _r; - match _r.try_recv() { - ::std::result::Result::Err($crate::TryRecvError::Empty) => { - $default_body - } - _res => { - let _res = _res.map_err(|_| $crate::RecvError); - let $res = _res; - $recv_body - } - } - } - } - }}; - // Optimize `select!` into `recv()`. - (@init - (recv($r:expr) -> $res:pat => $body:tt,) - () - ) => {{ - match $r { - ref _r => { - let _r: &$crate::Receiver<_> = _r; - let _res = _r.recv(); - let $res = _res; - $body - } - } - }}; - // Optimize `select!` into `recv_timeout()`. - (@init - (recv($r:expr) -> $res:pat => $recv_body:tt,) - (default($timeout:expr) => $default_body:tt,) - ) => {{ - match $r { - ref _r => { - let _r: &$crate::Receiver<_> = _r; - match _r.recv_timeout($timeout) { - ::std::result::Result::Err($crate::RecvTimeoutError::Timeout) => { - $default_body - } - _res => { - let _res = _res.map_err(|_| $crate::RecvError); - let $res = _res; - $recv_body - } - } - } - } - }}; - - // // Optimize the non-blocking case with two receive operations. - // (@init - // (recv($r1:expr) -> $res1:pat => $recv_body1:tt,) - // (recv($r2:expr) -> $res2:pat => $recv_body2:tt,) - // (default() => $default_body:tt,) - // ) => {{ - // match $r1 { - // ref _r1 => { - // let _r1: &$crate::Receiver<_> = _r1; - // - // match $r2 { - // ref _r2 => { - // let _r2: &$crate::Receiver<_> = _r2; - // - // // TODO(stjepang): Implement this optimization. - // } - // } - // } - // } - // }}; - // // Optimize the blocking case with two receive operations. - // (@init - // (recv($r1:expr) -> $res1:pat => $body1:tt,) - // (recv($r2:expr) -> $res2:pat => $body2:tt,) - // () - // ) => {{ - // match $r1 { - // ref _r1 => { - // let _r1: &$crate::Receiver<_> = _r1; - // - // match $r2 { - // ref _r2 => { - // let _r2: &$crate::Receiver<_> = _r2; - // - // // TODO(stjepang): Implement this optimization. - // } - // } - // } - // } - // }}; - // // Optimize the case with two receive operations and a timeout. - // (@init - // (recv($r1:expr) -> $res1:pat => $recv_body1:tt,) - // (recv($r2:expr) -> $res2:pat => $recv_body2:tt,) - // (default($timeout:expr) => $default_body:tt,) - // ) => {{ - // match $r1 { - // ref _r1 => { - // let _r1: &$crate::Receiver<_> = _r1; - // - // match $r2 { - // ref _r2 => { - // let _r2: &$crate::Receiver<_> = _r2; - // - // // TODO(stjepang): Implement this optimization. - // } - // } - // } - // } - // }}; - - // // Optimize `select!` into `try_send()`. - // (@init - // (send($s:expr, $m:expr) -> $res:pat => $send_body:tt,) - // (default() => $default_body:tt,) - // ) => {{ - // match $s { - // ref _s => { - // let _s: &$crate::Sender<_> = _s; - // // TODO(stjepang): Implement this optimization. - // } - // } - // }}; - // // Optimize `select!` into `send()`. - // (@init - // (send($s:expr, $m:expr) -> $res:pat => $body:tt,) - // () - // ) => {{ - // match $s { - // ref _s => { - // let _s: &$crate::Sender<_> = _s; - // // TODO(stjepang): Implement this optimization. - // } - // } - // }}; - // // Optimize `select!` into `send_timeout()`. - // (@init - // (send($s:expr, $m:expr) -> $res:pat => $body:tt,) - // (default($timeout:expr) => $body:tt,) - // ) => {{ - // match $s { - // ref _s => { - // let _s: &$crate::Sender<_> = _s; - // // TODO(stjepang): Implement this optimization. - // } - // } - // }}; - - // Create the list of handles and add operations to it. - (@init - ($($cases:tt)*) - $default:tt - ) => {{ - const _LEN: usize = crossbeam_channel_internal!(@count ($($cases)*)); - let _handle: &$crate::internal::SelectHandle = &$crate::never::<()>(); - - #[allow(unused_mut)] - let mut _sel = [(_handle, 0, ::std::ptr::null()); _LEN]; - - crossbeam_channel_internal!( - @add - _sel - ($($cases)*) - $default - ( - (0usize _oper0) - (1usize _oper1) - (2usize _oper2) - (3usize _oper3) - (4usize _oper4) - (5usize _oper5) - (6usize _oper6) - (7usize _oper7) - (8usize _oper8) - (9usize _oper9) - (10usize _oper10) - (11usize _oper11) - (12usize _oper12) - (13usize _oper13) - (14usize _oper14) - (15usize _oper15) - (16usize _oper16) - (17usize _oper17) - (18usize _oper18) - (19usize _oper19) - (20usize _oper20) - (21usize _oper21) - (22usize _oper22) - (23usize _oper23) - (24usize _oper24) - (25usize _oper25) - (26usize _oper26) - (27usize _oper27) - (28usize _oper28) - (29usize _oper29) - (30usize _oper30) - (31usize _oper31) - ) - () - ) - }}; - - // Count the listed cases. - (@count ()) => { - 0 - }; - (@count ($oper:ident $args:tt -> $res:pat => $body:tt, $($cases:tt)*)) => { - 1 + crossbeam_channel_internal!(@count ($($cases)*)) - }; - - // Run blocking selection. - (@add - $sel:ident - () - () - $labels:tt - $cases:tt - ) => {{ - let _oper: $crate::SelectedOperation<'_> = { - let _oper = $crate::internal::select(&mut $sel); - - // Erase the lifetime so that `sel` can be dropped early even without NLL. - #[allow(unsafe_code)] - unsafe { ::std::mem::transmute(_oper) } - }; - - crossbeam_channel_internal! { - @complete - $sel - _oper - $cases - } - }}; - // Run non-blocking selection. - (@add - $sel:ident - () - (default() => $body:tt,) - $labels:tt - $cases:tt - ) => {{ - let _oper: ::std::option::Option<$crate::SelectedOperation<'_>> = { - let _oper = $crate::internal::try_select(&mut $sel); - - // Erase the lifetime so that `sel` can be dropped early even without NLL. - #[allow(unsafe_code)] - unsafe { ::std::mem::transmute(_oper) } - }; - - match _oper { - None => { - { $sel }; - $body - } - Some(_oper) => { - crossbeam_channel_internal! { - @complete - $sel - _oper - $cases - } - } - } - }}; - // Run selection with a timeout. - (@add - $sel:ident - () - (default($timeout:expr) => $body:tt,) - $labels:tt - $cases:tt - ) => {{ - let _oper: ::std::option::Option<$crate::SelectedOperation<'_>> = { - let _oper = $crate::internal::select_timeout(&mut $sel, $timeout); - - // Erase the lifetime so that `sel` can be dropped early even without NLL. - #[allow(unsafe_code)] - unsafe { ::std::mem::transmute(_oper) } - }; - - match _oper { - ::std::option::Option::None => { - { $sel }; - $body - } - ::std::option::Option::Some(_oper) => { - crossbeam_channel_internal! { - @complete - $sel - _oper - $cases - } - } - } - }}; - // Have we used up all labels? - (@add - $sel:ident - $input:tt - $default:tt - () - $cases:tt - ) => { - crossbeam_channel_delegate!(compile_error("too many operations in a `select!` block")) - }; - // Add a receive operation to `sel`. - (@add - $sel:ident - (recv($r:expr) -> $res:pat => $body:tt, $($tail:tt)*) - $default:tt - (($i:tt $var:ident) $($labels:tt)*) - ($($cases:tt)*) - ) => {{ - match $r { - ref _r => { - #[allow(unsafe_code)] - let $var: &$crate::Receiver<_> = unsafe { - let _r: &$crate::Receiver<_> = _r; - - // Erase the lifetime so that `sel` can be dropped early even without NLL. - unsafe fn unbind<'a, T>(x: &T) -> &'a T { - ::std::mem::transmute(x) - } - unbind(_r) - }; - $sel[$i] = ($var, $i, $var as *const $crate::Receiver<_> as *const u8); - - crossbeam_channel_internal!( - @add - $sel - ($($tail)*) - $default - ($($labels)*) - ($($cases)* [$i] recv($var) -> $res => $body,) - ) - } - } - }}; - // Add a send operation to `sel`. - (@add - $sel:ident - (send($s:expr, $m:expr) -> $res:pat => $body:tt, $($tail:tt)*) - $default:tt - (($i:tt $var:ident) $($labels:tt)*) - ($($cases:tt)*) - ) => {{ - match $s { - ref _s => { - #[allow(unsafe_code)] - let $var: &$crate::Sender<_> = unsafe { - let _s: &$crate::Sender<_> = _s; - - // Erase the lifetime so that `sel` can be dropped early even without NLL. - unsafe fn unbind<'a, T>(x: &T) -> &'a T { - ::std::mem::transmute(x) - } - unbind(_s) - }; - $sel[$i] = ($var, $i, $var as *const $crate::Sender<_> as *const u8); - - crossbeam_channel_internal!( - @add - $sel - ($($tail)*) - $default - ($($labels)*) - ($($cases)* [$i] send($var, $m) -> $res => $body,) - ) - } - } - }}; - - // Complete a receive operation. - (@complete - $sel:ident - $oper:ident - ([$i:tt] recv($r:ident) -> $res:pat => $body:tt, $($tail:tt)*) - ) => {{ - if $oper.index() == $i { - let _res = $oper.recv($r); - { $sel }; - - let $res = _res; - $body - } else { - crossbeam_channel_internal! { - @complete - $sel - $oper - ($($tail)*) - } - } - }}; - // Complete a send operation. - (@complete - $sel:ident - $oper:ident - ([$i:tt] send($s:ident, $m:expr) -> $res:pat => $body:tt, $($tail:tt)*) - ) => {{ - if $oper.index() == $i { - let _res = $oper.send($s, $m); - { $sel }; - - let $res = _res; - $body - } else { - crossbeam_channel_internal! { - @complete - $sel - $oper - ($($tail)*) - } - } - }}; - // Panic if we don't identify the selected case, but this should never happen. - (@complete - $sel:ident - $oper:ident - () - ) => {{ - crossbeam_channel_delegate!(unreachable( - "internal error in crossbeam-channel: invalid case" - )) - }}; - - // Catches a bug within this macro (should not happen). - (@$($tokens:tt)*) => { - crossbeam_channel_delegate!(compile_error( - crossbeam_channel_delegate!(concat( - "internal error in crossbeam-channel: ", - crossbeam_channel_delegate!(stringify(@$($tokens)*)), - )) - )) - }; - - // The entry points. - () => { - crossbeam_channel_delegate!(compile_error("empty `select!` block")) - }; - ($($case:ident $(($($args:tt)*))* => $body:expr $(,)*)*) => { - crossbeam_channel_internal!( - @list - ($($case $(($($args)*))* => { $body },)*) - () - ) - }; - ($($tokens:tt)*) => { - crossbeam_channel_internal!( - @list - ($($tokens)*) - () - ) - }; -} - -/// Selects from a set of channel operations. -/// -/// This macro allows you to define a set of channel operations, wait until any one of them becomes -/// ready, and finally execute it. If multiple operations are ready at the same time, a random one -/// among them is selected. -/// -/// It is also possible to define a `default` case that gets executed if none of the operations are -/// ready, either right away or for a certain duration of time. -/// -/// An operation is considered to be ready if it doesn't have to block. Note that it is ready even -/// when it will simply return an error because the channel is disconnected. -/// -/// The `select` macro is a convenience wrapper around [`Select`]. However, it cannot select over a -/// dynamically created list of channel operations. -/// -/// [`Select`]: struct.Select.html -/// -/// # Examples -/// -/// Block until a send or a receive operation is selected: -/// -/// ``` -/// # #[macro_use] -/// # extern crate crossbeam_channel; -/// # fn main() { -/// use std::thread; -/// use crossbeam_channel::unbounded; -/// -/// let (s1, r1) = unbounded(); -/// let (s2, r2) = unbounded(); -/// s1.send(10).unwrap(); -/// -/// // Since both operations are initially ready, a random one will be executed. -/// select! { -/// recv(r1) -> msg => assert_eq!(msg, Ok(10)), -/// send(s2, 20) -> res => { -/// assert_eq!(res, Ok(())); -/// assert_eq!(r2.recv(), Ok(20)); -/// } -/// } -/// # } -/// ``` -/// -/// Select from a set of operations without blocking: -/// -/// ``` -/// # #[macro_use] -/// # extern crate crossbeam_channel; -/// # fn main() { -/// use std::thread; -/// use std::time::Duration; -/// use crossbeam_channel::unbounded; -/// -/// let (s1, r1) = unbounded(); -/// let (s2, r2) = unbounded(); -/// -/// thread::spawn(move || { -/// thread::sleep(Duration::from_secs(1)); -/// s1.send(10).unwrap(); -/// }); -/// thread::spawn(move || { -/// thread::sleep(Duration::from_millis(500)); -/// s2.send(20).unwrap(); -/// }); -/// -/// // None of the operations are initially ready. -/// select! { -/// recv(r1) -> msg => panic!(), -/// recv(r2) -> msg => panic!(), -/// default => println!("not ready"), -/// } -/// # } -/// ``` -/// -/// Select over a set of operations with a timeout: -/// -/// ``` -/// # #[macro_use] -/// # extern crate crossbeam_channel; -/// # fn main() { -/// use std::thread; -/// use std::time::Duration; -/// use crossbeam_channel::unbounded; -/// -/// let (s1, r1) = unbounded(); -/// let (s2, r2) = unbounded(); -/// -/// thread::spawn(move || { -/// thread::sleep(Duration::from_secs(1)); -/// s1.send(10).unwrap(); -/// }); -/// thread::spawn(move || { -/// thread::sleep(Duration::from_millis(500)); -/// s2.send(20).unwrap(); -/// }); -/// -/// // None of the two operations will become ready within 100 milliseconds. -/// select! { -/// recv(r1) -> msg => panic!(), -/// recv(r2) -> msg => panic!(), -/// default(Duration::from_millis(100)) => println!("timed out"), -/// } -/// # } -/// ``` -/// -/// Optionally add a receive operation to `select!` using [`never`]: -/// -/// ``` -/// # #[macro_use] -/// # extern crate crossbeam_channel; -/// # fn main() { -/// use std::thread; -/// use std::time::Duration; -/// use crossbeam_channel::{never, unbounded}; -/// -/// let (s1, r1) = unbounded(); -/// let (s2, r2) = unbounded(); -/// -/// thread::spawn(move || { -/// thread::sleep(Duration::from_secs(1)); -/// s1.send(10).unwrap(); -/// }); -/// thread::spawn(move || { -/// thread::sleep(Duration::from_millis(500)); -/// s2.send(20).unwrap(); -/// }); -/// -/// // This receiver can be a `Some` or a `None`. -/// let r2 = Some(&r2); -/// -/// // None of the two operations will become ready within 100 milliseconds. -/// select! { -/// recv(r1) -> msg => panic!(), -/// recv(r2.unwrap_or(&never())) -> msg => assert_eq!(msg, Ok(20)), -/// } -/// # } -/// ``` -/// -/// To optionally add a timeout to `select!`, see the [example] for [`never`]. -/// -/// [`never`]: fn.never.html -/// [example]: fn.never.html#examples -#[macro_export(local_inner_macros)] -macro_rules! select { - ($($tokens:tt)*) => { - crossbeam_channel_internal!( - $($tokens)* - ) - }; -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/src/select.rs cargo-0.47.0/vendor/crossbeam-channel/src/select.rs --- cargo-0.44.1/vendor/crossbeam-channel/src/select.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/src/select.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1166 +0,0 @@ -//! Interface to the select mechanism. - -use std::fmt; -use std::marker::PhantomData; -use std::mem; -use std::time::{Duration, Instant}; - -use crossbeam_utils::Backoff; - -use channel::{self, Receiver, Sender}; -use context::Context; -use err::{ReadyTimeoutError, TryReadyError}; -use err::{RecvError, SendError}; -use err::{SelectTimeoutError, TrySelectError}; -use flavors; -use utils; - -/// Temporary data that gets initialized during select or a blocking operation, and is consumed by -/// `read` or `write`. -/// -/// Each field contains data associated with a specific channel flavor. -#[derive(Debug, Default)] -pub struct Token { - pub after: flavors::after::AfterToken, - pub array: flavors::array::ArrayToken, - pub list: flavors::list::ListToken, - pub never: flavors::never::NeverToken, - pub tick: flavors::tick::TickToken, - pub zero: flavors::zero::ZeroToken, -} - -/// Identifier associated with an operation by a specific thread on a specific channel. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct Operation(usize); - -impl Operation { - /// Creates an operation identifier from a mutable reference. - /// - /// This function essentially just turns the address of the reference into a number. The - /// reference should point to a variable that is specific to the thread and the operation, - /// and is alive for the entire duration of select or blocking operation. - #[inline] - pub fn hook(r: &mut T) -> Operation { - let val = r as *mut T as usize; - // Make sure that the pointer address doesn't equal the numerical representation of - // `Selected::{Waiting, Aborted, Disconnected}`. - assert!(val > 2); - Operation(val) - } -} - -/// Current state of a select or a blocking operation. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum Selected { - /// Still waiting for an operation. - Waiting, - - /// The attempt to block the current thread has been aborted. - Aborted, - - /// An operation became ready because a channel is disconnected. - Disconnected, - - /// An operation became ready because a message can be sent or received. - Operation(Operation), -} - -impl From for Selected { - #[inline] - fn from(val: usize) -> Selected { - match val { - 0 => Selected::Waiting, - 1 => Selected::Aborted, - 2 => Selected::Disconnected, - oper => Selected::Operation(Operation(oper)), - } - } -} - -impl Into for Selected { - #[inline] - fn into(self) -> usize { - match self { - Selected::Waiting => 0, - Selected::Aborted => 1, - Selected::Disconnected => 2, - Selected::Operation(Operation(val)) => val, - } - } -} - -/// A receiver or a sender that can participate in select. -/// -/// This is a handle that assists select in executing an operation, registration, deciding on the -/// appropriate deadline for blocking, etc. -pub trait SelectHandle { - /// Attempts to select an operation and returns `true` on success. - fn try_select(&self, token: &mut Token) -> bool; - - /// Returns a deadline for an operation, if there is one. - fn deadline(&self) -> Option; - - /// Registers an operation for execution and returns `true` if it is now ready. - fn register(&self, oper: Operation, cx: &Context) -> bool; - - /// Unregisters an operation for execution. - fn unregister(&self, oper: Operation); - - /// Attempts to select an operation the thread got woken up for and returns `true` on success. - fn accept(&self, token: &mut Token, cx: &Context) -> bool; - - /// Returns `true` if an operation can be executed without blocking. - fn is_ready(&self) -> bool; - - /// Registers an operation for readiness notification and returns `true` if it is now ready. - fn watch(&self, oper: Operation, cx: &Context) -> bool; - - /// Unregisters an operation for readiness notification. - fn unwatch(&self, oper: Operation); -} - -impl<'a, T: SelectHandle> SelectHandle for &'a T { - fn try_select(&self, token: &mut Token) -> bool { - (**self).try_select(token) - } - - fn deadline(&self) -> Option { - (**self).deadline() - } - - fn register(&self, oper: Operation, cx: &Context) -> bool { - (**self).register(oper, cx) - } - - fn unregister(&self, oper: Operation) { - (**self).unregister(oper); - } - - fn accept(&self, token: &mut Token, cx: &Context) -> bool { - (**self).accept(token, cx) - } - - fn is_ready(&self) -> bool { - (**self).is_ready() - } - - fn watch(&self, oper: Operation, cx: &Context) -> bool { - (**self).watch(oper, cx) - } - - fn unwatch(&self, oper: Operation) { - (**self).unwatch(oper) - } -} - -/// Determines when a select operation should time out. -#[derive(Clone, Copy, Eq, PartialEq)] -enum Timeout { - /// No blocking. - Now, - - /// Block forever. - Never, - - /// Time out after the time instant. - At(Instant), -} - -/// Runs until one of the operations is selected, potentially blocking the current thread. -/// -/// Successful receive operations will have to be followed up by `channel::read()` and successful -/// send operations by `channel::write()`. -fn run_select( - handles: &mut [(&dyn SelectHandle, usize, *const u8)], - timeout: Timeout, -) -> Option<(Token, usize, *const u8)> { - if handles.is_empty() { - // Wait until the timeout and return. - match timeout { - Timeout::Now => return None, - Timeout::Never => { - utils::sleep_until(None); - unreachable!(); - } - Timeout::At(when) => { - utils::sleep_until(Some(when)); - return None; - } - } - } - - // Shuffle the operations for fairness. - utils::shuffle(handles); - - // Create a token, which serves as a temporary variable that gets initialized in this function - // and is later used by a call to `channel::read()` or `channel::write()` that completes the - // selected operation. - let mut token = Token::default(); - - // Try selecting one of the operations without blocking. - for &(handle, i, ptr) in handles.iter() { - if handle.try_select(&mut token) { - return Some((token, i, ptr)); - } - } - - loop { - // Prepare for blocking. - let res = Context::with(|cx| { - let mut sel = Selected::Waiting; - let mut registered_count = 0; - let mut index_ready = None; - - if let Timeout::Now = timeout { - cx.try_select(Selected::Aborted).unwrap(); - } - - // Register all operations. - for (handle, i, _) in handles.iter_mut() { - registered_count += 1; - - // If registration returns `false`, that means the operation has just become ready. - if handle.register(Operation::hook::<&dyn SelectHandle>(handle), cx) { - // Try aborting select. - sel = match cx.try_select(Selected::Aborted) { - Ok(()) => { - index_ready = Some(*i); - Selected::Aborted - } - Err(s) => s, - }; - break; - } - - // If another thread has already selected one of the operations, stop registration. - sel = cx.selected(); - if sel != Selected::Waiting { - break; - } - } - - if sel == Selected::Waiting { - // Check with each operation for how long we're allowed to block, and compute the - // earliest deadline. - let mut deadline: Option = match timeout { - Timeout::Now => return None, - Timeout::Never => None, - Timeout::At(when) => Some(when), - }; - for &(handle, _, _) in handles.iter() { - if let Some(x) = handle.deadline() { - deadline = deadline.map(|y| x.min(y)).or(Some(x)); - } - } - - // Block the current thread. - sel = cx.wait_until(deadline); - } - - // Unregister all registered operations. - for (handle, _, _) in handles.iter_mut().take(registered_count) { - handle.unregister(Operation::hook::<&dyn SelectHandle>(handle)); - } - - match sel { - Selected::Waiting => unreachable!(), - Selected::Aborted => { - // If an operation became ready during registration, try selecting it. - if let Some(index_ready) = index_ready { - for &(handle, i, ptr) in handles.iter() { - if i == index_ready && handle.try_select(&mut token) { - return Some((i, ptr)); - } - } - } - } - Selected::Disconnected => {} - Selected::Operation(_) => { - // Find the selected operation. - for (handle, i, ptr) in handles.iter_mut() { - // Is this the selected operation? - if sel == Selected::Operation(Operation::hook::<&dyn SelectHandle>(handle)) - { - // Try selecting this operation. - if handle.accept(&mut token, cx) { - return Some((*i, *ptr)); - } - } - } - } - } - - None - }); - - // Return if an operation was selected. - if let Some((i, ptr)) = res { - return Some((token, i, ptr)); - } - - // Try selecting one of the operations without blocking. - for &(handle, i, ptr) in handles.iter() { - if handle.try_select(&mut token) { - return Some((token, i, ptr)); - } - } - - match timeout { - Timeout::Now => return None, - Timeout::Never => {} - Timeout::At(when) => { - if Instant::now() >= when { - return None; - } - } - } - } -} - -/// Runs until one of the operations becomes ready, potentially blocking the current thread. -fn run_ready( - handles: &mut [(&dyn SelectHandle, usize, *const u8)], - timeout: Timeout, -) -> Option { - if handles.is_empty() { - // Wait until the timeout and return. - match timeout { - Timeout::Now => return None, - Timeout::Never => { - utils::sleep_until(None); - unreachable!(); - } - Timeout::At(when) => { - utils::sleep_until(Some(when)); - return None; - } - } - } - - // Shuffle the operations for fairness. - utils::shuffle(handles); - - loop { - let backoff = Backoff::new(); - loop { - // Check operations for readiness. - for &(handle, i, _) in handles.iter() { - if handle.is_ready() { - return Some(i); - } - } - - if backoff.is_completed() { - break; - } else { - backoff.snooze(); - } - } - - // Check for timeout. - match timeout { - Timeout::Now => return None, - Timeout::Never => {} - Timeout::At(when) => { - if Instant::now() >= when { - return None; - } - } - } - - // Prepare for blocking. - let res = Context::with(|cx| { - let mut sel = Selected::Waiting; - let mut registered_count = 0; - - // Begin watching all operations. - for (handle, _, _) in handles.iter_mut() { - registered_count += 1; - let oper = Operation::hook::<&dyn SelectHandle>(handle); - - // If registration returns `false`, that means the operation has just become ready. - if handle.watch(oper, cx) { - sel = match cx.try_select(Selected::Operation(oper)) { - Ok(()) => Selected::Operation(oper), - Err(s) => s, - }; - break; - } - - // If another thread has already chosen one of the operations, stop registration. - sel = cx.selected(); - if sel != Selected::Waiting { - break; - } - } - - if sel == Selected::Waiting { - // Check with each operation for how long we're allowed to block, and compute the - // earliest deadline. - let mut deadline: Option = match timeout { - Timeout::Now => unreachable!(), - Timeout::Never => None, - Timeout::At(when) => Some(when), - }; - for &(handle, _, _) in handles.iter() { - if let Some(x) = handle.deadline() { - deadline = deadline.map(|y| x.min(y)).or(Some(x)); - } - } - - // Block the current thread. - sel = cx.wait_until(deadline); - } - - // Unwatch all operations. - for (handle, _, _) in handles.iter_mut().take(registered_count) { - handle.unwatch(Operation::hook::<&dyn SelectHandle>(handle)); - } - - match sel { - Selected::Waiting => unreachable!(), - Selected::Aborted => {} - Selected::Disconnected => {} - Selected::Operation(_) => { - for (handle, i, _) in handles.iter_mut() { - let oper = Operation::hook::<&dyn SelectHandle>(handle); - if sel == Selected::Operation(oper) { - return Some(*i); - } - } - } - } - - None - }); - - // Return if an operation became ready. - if res.is_some() { - return res; - } - } -} - -/// Attempts to select one of the operations without blocking. -#[inline] -pub fn try_select<'a>( - handles: &mut [(&'a dyn SelectHandle, usize, *const u8)], -) -> Result, TrySelectError> { - match run_select(handles, Timeout::Now) { - None => Err(TrySelectError), - Some((token, index, ptr)) => Ok(SelectedOperation { - token, - index, - ptr, - _marker: PhantomData, - }), - } -} - -/// Blocks until one of the operations becomes ready and selects it. -#[inline] -pub fn select<'a>( - handles: &mut [(&'a dyn SelectHandle, usize, *const u8)], -) -> SelectedOperation<'a> { - if handles.is_empty() { - panic!("no operations have been added to `Select`"); - } - - let (token, index, ptr) = run_select(handles, Timeout::Never).unwrap(); - SelectedOperation { - token, - index, - ptr, - _marker: PhantomData, - } -} - -/// Blocks for a limited time until one of the operations becomes ready and selects it. -#[inline] -pub fn select_timeout<'a>( - handles: &mut [(&'a dyn SelectHandle, usize, *const u8)], - timeout: Duration, -) -> Result, SelectTimeoutError> { - let timeout = Timeout::At(Instant::now() + timeout); - - match run_select(handles, timeout) { - None => Err(SelectTimeoutError), - Some((token, index, ptr)) => Ok(SelectedOperation { - token, - index, - ptr, - _marker: PhantomData, - }), - } -} - -/// Selects from a set of channel operations. -/// -/// `Select` allows you to define a set of channel operations, wait until any one of them becomes -/// ready, and finally execute it. If multiple operations are ready at the same time, a random one -/// among them is selected. -/// -/// An operation is considered to be ready if it doesn't have to block. Note that it is ready even -/// when it will simply return an error because the channel is disconnected. -/// -/// The [`select!`] macro is a convenience wrapper around `Select`. However, it cannot select over a -/// dynamically created list of channel operations. -/// -/// Once a list of operations has been built with `Select`, there are two different ways of -/// proceeding: -/// -/// * Select an operation with [`try_select`], [`select`], or [`select_timeout`]. If successful, -/// the returned selected operation has already begun and **must** be completed. If we don't -/// complete it, a panic will occur. -/// -/// * Wait for an operation to become ready with [`try_ready`], [`ready`], or [`ready_timeout`]. If -/// successful, we may attempt to execute the operation, but are not obliged to. In fact, it's -/// possible for another thread to make the operation not ready just before we try executing it, -/// so it's wise to use a retry loop. However, note that these methods might return with success -/// spuriously, so it's a good idea to always double check if the operation is really ready. -/// -/// # Examples -/// -/// Use [`select`] to receive a message from a list of receivers: -/// -/// ``` -/// use crossbeam_channel::{Receiver, RecvError, Select}; -/// -/// fn recv_multiple(rs: &[Receiver]) -> Result { -/// // Build a list of operations. -/// let mut sel = Select::new(); -/// for r in rs { -/// sel.recv(r); -/// } -/// -/// // Complete the selected operation. -/// let oper = sel.select(); -/// let index = oper.index(); -/// oper.recv(&rs[index]) -/// } -/// ``` -/// -/// Use [`ready`] to receive a message from a list of receivers: -/// -/// ``` -/// use crossbeam_channel::{Receiver, RecvError, Select}; -/// -/// fn recv_multiple(rs: &[Receiver]) -> Result { -/// // Build a list of operations. -/// let mut sel = Select::new(); -/// for r in rs { -/// sel.recv(r); -/// } -/// -/// loop { -/// // Wait until a receive operation becomes ready and try executing it. -/// let index = sel.ready(); -/// let res = rs[index].try_recv(); -/// -/// // If the operation turns out not to be ready, retry. -/// if let Err(e) = res { -/// if e.is_empty() { -/// continue; -/// } -/// } -/// -/// // Success! -/// return res.map_err(|_| RecvError); -/// } -/// } -/// ``` -/// -/// [`select!`]: macro.select.html -/// [`try_select`]: struct.Select.html#method.try_select -/// [`select`]: struct.Select.html#method.select -/// [`select_timeout`]: struct.Select.html#method.select_timeout -/// [`try_ready`]: struct.Select.html#method.try_ready -/// [`ready`]: struct.Select.html#method.ready -/// [`ready_timeout`]: struct.Select.html#method.ready_timeout -pub struct Select<'a> { - /// A list of senders and receivers participating in selection. - handles: Vec<(&'a dyn SelectHandle, usize, *const u8)>, - - /// The next index to assign to an operation. - next_index: usize, -} - -unsafe impl<'a> Send for Select<'a> {} -unsafe impl<'a> Sync for Select<'a> {} - -impl<'a> Select<'a> { - /// Creates an empty list of channel operations for selection. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::Select; - /// - /// let mut sel = Select::new(); - /// - /// // The list of operations is empty, which means no operation can be selected. - /// assert!(sel.try_select().is_err()); - /// ``` - pub fn new() -> Select<'a> { - Select { - handles: Vec::with_capacity(4), - next_index: 0, - } - } - - /// Adds a send operation. - /// - /// Returns the index of the added operation. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s, r) = unbounded::(); - /// - /// let mut sel = Select::new(); - /// let index = sel.send(&s); - /// ``` - pub fn send(&mut self, s: &'a Sender) -> usize { - let i = self.next_index; - let ptr = s as *const Sender<_> as *const u8; - self.handles.push((s, i, ptr)); - self.next_index += 1; - i - } - - /// Adds a receive operation. - /// - /// Returns the index of the added operation. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s, r) = unbounded::(); - /// - /// let mut sel = Select::new(); - /// let index = sel.recv(&r); - /// ``` - pub fn recv(&mut self, r: &'a Receiver) -> usize { - let i = self.next_index; - let ptr = r as *const Receiver<_> as *const u8; - self.handles.push((r, i, ptr)); - self.next_index += 1; - i - } - - /// Removes a previously added operation. - /// - /// This is useful when an operation is selected because the channel got disconnected and we - /// want to try again to select a different operation instead. - /// - /// If new operations are added after removing some, the indices of removed operations will not - /// be reused. - /// - /// # Panics - /// - /// An attempt to remove a non-existing or already removed operation will panic. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s1, r1) = unbounded::(); - /// let (_, r2) = unbounded::(); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r1); - /// let oper2 = sel.recv(&r2); - /// - /// // Both operations are initially ready, so a random one will be executed. - /// let oper = sel.select(); - /// assert_eq!(oper.index(), oper2); - /// assert!(oper.recv(&r2).is_err()); - /// sel.remove(oper2); - /// - /// s1.send(10).unwrap(); - /// - /// let oper = sel.select(); - /// assert_eq!(oper.index(), oper1); - /// assert_eq!(oper.recv(&r1), Ok(10)); - /// ``` - pub fn remove(&mut self, index: usize) { - assert!( - index < self.next_index, - "index out of bounds; {} >= {}", - index, - self.next_index, - ); - - let i = self - .handles - .iter() - .enumerate() - .find(|(_, (_, i, _))| *i == index) - .expect("no operation with this index") - .0; - - self.handles.swap_remove(i); - } - - /// Attempts to select one of the operations without blocking. - /// - /// If an operation is ready, it is selected and returned. If multiple operations are ready at - /// the same time, a random one among them is selected. If none of the operations are ready, an - /// error is returned. - /// - /// An operation is considered to be ready if it doesn't have to block. Note that it is ready - /// even when it will simply return an error because the channel is disconnected. - /// - /// The selected operation must be completed with [`SelectedOperation::send`] - /// or [`SelectedOperation::recv`]. - /// - /// [`SelectedOperation::send`]: struct.SelectedOperation.html#method.send - /// [`SelectedOperation::recv`]: struct.SelectedOperation.html#method.recv - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s1, r1) = unbounded(); - /// let (s2, r2) = unbounded(); - /// - /// s1.send(10).unwrap(); - /// s2.send(20).unwrap(); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r1); - /// let oper2 = sel.recv(&r2); - /// - /// // Both operations are initially ready, so a random one will be executed. - /// let oper = sel.try_select(); - /// match oper { - /// Err(_) => panic!("both operations should be ready"), - /// Ok(oper) => match oper.index() { - /// i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(10)), - /// i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(20)), - /// _ => unreachable!(), - /// } - /// } - /// ``` - pub fn try_select(&mut self) -> Result, TrySelectError> { - try_select(&mut self.handles) - } - - /// Blocks until one of the operations becomes ready and selects it. - /// - /// Once an operation becomes ready, it is selected and returned. If multiple operations are - /// ready at the same time, a random one among them is selected. - /// - /// An operation is considered to be ready if it doesn't have to block. Note that it is ready - /// even when it will simply return an error because the channel is disconnected. - /// - /// The selected operation must be completed with [`SelectedOperation::send`] - /// or [`SelectedOperation::recv`]. - /// - /// [`SelectedOperation::send`]: struct.SelectedOperation.html#method.send - /// [`SelectedOperation::recv`]: struct.SelectedOperation.html#method.recv - /// - /// # Panics - /// - /// Panics if no operations have been added to `Select`. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s1, r1) = unbounded(); - /// let (s2, r2) = unbounded(); - /// - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// s1.send(10).unwrap(); - /// }); - /// thread::spawn(move || s2.send(20).unwrap()); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r1); - /// let oper2 = sel.recv(&r2); - /// - /// // The second operation will be selected because it becomes ready first. - /// let oper = sel.select(); - /// match oper.index() { - /// i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(10)), - /// i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(20)), - /// _ => unreachable!(), - /// } - /// ``` - pub fn select(&mut self) -> SelectedOperation<'a> { - select(&mut self.handles) - } - - /// Blocks for a limited time until one of the operations becomes ready and selects it. - /// - /// If an operation becomes ready, it is selected and returned. If multiple operations are - /// ready at the same time, a random one among them is selected. If none of the operations - /// become ready for the specified duration, an error is returned. - /// - /// An operation is considered to be ready if it doesn't have to block. Note that it is ready - /// even when it will simply return an error because the channel is disconnected. - /// - /// The selected operation must be completed with [`SelectedOperation::send`] - /// or [`SelectedOperation::recv`]. - /// - /// [`SelectedOperation::send`]: struct.SelectedOperation.html#method.send - /// [`SelectedOperation::recv`]: struct.SelectedOperation.html#method.recv - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s1, r1) = unbounded(); - /// let (s2, r2) = unbounded(); - /// - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// s1.send(10).unwrap(); - /// }); - /// thread::spawn(move || s2.send(20).unwrap()); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r1); - /// let oper2 = sel.recv(&r2); - /// - /// // The second operation will be selected because it becomes ready first. - /// let oper = sel.select_timeout(Duration::from_millis(500)); - /// match oper { - /// Err(_) => panic!("should not have timed out"), - /// Ok(oper) => match oper.index() { - /// i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(10)), - /// i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(20)), - /// _ => unreachable!(), - /// } - /// } - /// ``` - pub fn select_timeout( - &mut self, - timeout: Duration, - ) -> Result, SelectTimeoutError> { - select_timeout(&mut self.handles, timeout) - } - - /// Attempts to find a ready operation without blocking. - /// - /// If an operation is ready, its index is returned. If multiple operations are ready at the - /// same time, a random one among them is chosen. If none of the operations are ready, an error - /// is returned. - /// - /// An operation is considered to be ready if it doesn't have to block. Note that it is ready - /// even when it will simply return an error because the channel is disconnected. - /// - /// Note that this method might return with success spuriously, so it's a good idea to always - /// double check if the operation is really ready. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s1, r1) = unbounded(); - /// let (s2, r2) = unbounded(); - /// - /// s1.send(10).unwrap(); - /// s2.send(20).unwrap(); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r1); - /// let oper2 = sel.recv(&r2); - /// - /// // Both operations are initially ready, so a random one will be chosen. - /// match sel.try_ready() { - /// Err(_) => panic!("both operations should be ready"), - /// Ok(i) if i == oper1 => assert_eq!(r1.try_recv(), Ok(10)), - /// Ok(i) if i == oper2 => assert_eq!(r2.try_recv(), Ok(20)), - /// Ok(_) => unreachable!(), - /// } - /// ``` - pub fn try_ready(&mut self) -> Result { - match run_ready(&mut self.handles, Timeout::Now) { - None => Err(TryReadyError), - Some(index) => Ok(index), - } - } - - /// Blocks until one of the operations becomes ready. - /// - /// Once an operation becomes ready, its index is returned. If multiple operations are ready at - /// the same time, a random one among them is chosen. - /// - /// An operation is considered to be ready if it doesn't have to block. Note that it is ready - /// even when it will simply return an error because the channel is disconnected. - /// - /// Note that this method might return with success spuriously, so it's a good idea to always - /// double check if the operation is really ready. - /// - /// # Panics - /// - /// Panics if no operations have been added to `Select`. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s1, r1) = unbounded(); - /// let (s2, r2) = unbounded(); - /// - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// s1.send(10).unwrap(); - /// }); - /// thread::spawn(move || s2.send(20).unwrap()); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r1); - /// let oper2 = sel.recv(&r2); - /// - /// // The second operation will be selected because it becomes ready first. - /// match sel.ready() { - /// i if i == oper1 => assert_eq!(r1.try_recv(), Ok(10)), - /// i if i == oper2 => assert_eq!(r2.try_recv(), Ok(20)), - /// _ => unreachable!(), - /// } - /// ``` - pub fn ready(&mut self) -> usize { - if self.handles.is_empty() { - panic!("no operations have been added to `Select`"); - } - - run_ready(&mut self.handles, Timeout::Never).unwrap() - } - - /// Blocks for a limited time until one of the operations becomes ready. - /// - /// If an operation becomes ready, its index is returned. If multiple operations are ready at - /// the same time, a random one among them is chosen. If none of the operations become ready - /// for the specified duration, an error is returned. - /// - /// An operation is considered to be ready if it doesn't have to block. Note that it is ready - /// even when it will simply return an error because the channel is disconnected. - /// - /// Note that this method might return with success spuriously, so it's a good idea to double - /// check if the operation is really ready. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s1, r1) = unbounded(); - /// let (s2, r2) = unbounded(); - /// - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// s1.send(10).unwrap(); - /// }); - /// thread::spawn(move || s2.send(20).unwrap()); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r1); - /// let oper2 = sel.recv(&r2); - /// - /// // The second operation will be selected because it becomes ready first. - /// match sel.ready_timeout(Duration::from_millis(500)) { - /// Err(_) => panic!("should not have timed out"), - /// Ok(i) if i == oper1 => assert_eq!(r1.try_recv(), Ok(10)), - /// Ok(i) if i == oper2 => assert_eq!(r2.try_recv(), Ok(20)), - /// Ok(_) => unreachable!(), - /// } - /// ``` - pub fn ready_timeout(&mut self, timeout: Duration) -> Result { - let timeout = Timeout::At(Instant::now() + timeout); - - match run_ready(&mut self.handles, timeout) { - None => Err(ReadyTimeoutError), - Some(index) => Ok(index), - } - } -} - -impl<'a> Clone for Select<'a> { - fn clone(&self) -> Select<'a> { - Select { - handles: self.handles.clone(), - next_index: self.next_index, - } - } -} - -impl<'a> Default for Select<'a> { - fn default() -> Select<'a> { - Select::new() - } -} - -impl<'a> fmt::Debug for Select<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.pad("Select { .. }") - } -} - -/// A selected operation that needs to be completed. -/// -/// To complete the operation, call [`send`] or [`recv`]. -/// -/// # Panics -/// -/// Forgetting to complete the operation is an error and might lead to deadlocks. If a -/// `SelectedOperation` is dropped without completion, a panic occurs. -/// -/// [`send`]: struct.SelectedOperation.html#method.send -/// [`recv`]: struct.SelectedOperation.html#method.recv -#[must_use] -pub struct SelectedOperation<'a> { - /// Token needed to complete the operation. - token: Token, - - /// The index of the selected operation. - index: usize, - - /// The address of the selected `Sender` or `Receiver`. - ptr: *const u8, - - /// Indicates that `Sender`s and `Receiver`s are borrowed. - _marker: PhantomData<&'a ()>, -} - -impl<'a> SelectedOperation<'a> { - /// Returns the index of the selected operation. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{bounded, Select}; - /// - /// let (s1, r1) = bounded::<()>(0); - /// let (s2, r2) = bounded::<()>(0); - /// let (s3, r3) = bounded::<()>(1); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.send(&s1); - /// let oper2 = sel.recv(&r2); - /// let oper3 = sel.send(&s3); - /// - /// // Only the last operation is ready. - /// let oper = sel.select(); - /// assert_eq!(oper.index(), 2); - /// assert_eq!(oper.index(), oper3); - /// - /// // Complete the operation. - /// oper.send(&s3, ()).unwrap(); - /// ``` - pub fn index(&self) -> usize { - self.index - } - - /// Completes the send operation. - /// - /// The passed [`Sender`] reference must be the same one that was used in [`Select::send`] - /// when the operation was added. - /// - /// # Panics - /// - /// Panics if an incorrect [`Sender`] reference is passed. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{bounded, Select, SendError}; - /// - /// let (s, r) = bounded::(0); - /// drop(r); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.send(&s); - /// - /// let oper = sel.select(); - /// assert_eq!(oper.index(), oper1); - /// assert_eq!(oper.send(&s, 10), Err(SendError(10))); - /// ``` - /// - /// [`Sender`]: struct.Sender.html - /// [`Select::send`]: struct.Select.html#method.send - pub fn send(mut self, s: &Sender, msg: T) -> Result<(), SendError> { - assert!( - s as *const Sender as *const u8 == self.ptr, - "passed a sender that wasn't selected", - ); - let res = unsafe { channel::write(s, &mut self.token, msg) }; - mem::forget(self); - res.map_err(SendError) - } - - /// Completes the receive operation. - /// - /// The passed [`Receiver`] reference must be the same one that was used in [`Select::recv`] - /// when the operation was added. - /// - /// # Panics - /// - /// Panics if an incorrect [`Receiver`] reference is passed. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{bounded, Select, RecvError}; - /// - /// let (s, r) = bounded::(0); - /// drop(s); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r); - /// - /// let oper = sel.select(); - /// assert_eq!(oper.index(), oper1); - /// assert_eq!(oper.recv(&r), Err(RecvError)); - /// ``` - /// - /// [`Receiver`]: struct.Receiver.html - /// [`Select::recv`]: struct.Select.html#method.recv - pub fn recv(mut self, r: &Receiver) -> Result { - assert!( - r as *const Receiver as *const u8 == self.ptr, - "passed a receiver that wasn't selected", - ); - let res = unsafe { channel::read(r, &mut self.token) }; - mem::forget(self); - res.map_err(|_| RecvError) - } -} - -impl<'a> fmt::Debug for SelectedOperation<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.pad("SelectedOperation { .. }") - } -} - -impl<'a> Drop for SelectedOperation<'a> { - fn drop(&mut self) { - panic!("dropped `SelectedOperation` without completing the operation"); - } -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/src/utils.rs cargo-0.47.0/vendor/crossbeam-channel/src/utils.rs --- cargo-0.44.1/vendor/crossbeam-channel/src/utils.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/src/utils.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,112 +0,0 @@ -//! Miscellaneous utilities. - -use std::cell::{Cell, UnsafeCell}; -use std::num::Wrapping; -use std::ops::{Deref, DerefMut}; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_utils::Backoff; - -/// Randomly shuffles a slice. -pub fn shuffle(v: &mut [T]) { - let len = v.len(); - if len <= 1 { - return; - } - - thread_local! { - static RNG: Cell> = Cell::new(Wrapping(1406868647)); - } - - let _ = RNG.try_with(|rng| { - for i in 1..len { - // This is the 32-bit variant of Xorshift. - // - // Source: https://en.wikipedia.org/wiki/Xorshift - let mut x = rng.get(); - x ^= x << 13; - x ^= x >> 17; - x ^= x << 5; - rng.set(x); - - let x = x.0; - let n = i + 1; - - // This is a fast alternative to `let j = x % n`. - // - // Author: Daniel Lemire - // Source: https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ - let j = ((x as u64).wrapping_mul(n as u64) >> 32) as u32 as usize; - - v.swap(i, j); - } - }); -} - -/// Sleeps until the deadline, or forever if the deadline isn't specified. -pub fn sleep_until(deadline: Option) { - loop { - match deadline { - None => thread::sleep(Duration::from_secs(1000)), - Some(d) => { - let now = Instant::now(); - if now >= d { - break; - } - thread::sleep(d - now); - } - } - } -} - -/// A simple spinlock. -pub struct Spinlock { - flag: AtomicBool, - value: UnsafeCell, -} - -impl Spinlock { - /// Returns a new spinlock initialized with `value`. - pub fn new(value: T) -> Spinlock { - Spinlock { - flag: AtomicBool::new(false), - value: UnsafeCell::new(value), - } - } - - /// Locks the spinlock. - pub fn lock(&self) -> SpinlockGuard<'_, T> { - let backoff = Backoff::new(); - while self.flag.swap(true, Ordering::Acquire) { - backoff.snooze(); - } - SpinlockGuard { parent: self } - } -} - -/// A guard holding a spinlock locked. -pub struct SpinlockGuard<'a, T: 'a> { - parent: &'a Spinlock, -} - -impl<'a, T> Drop for SpinlockGuard<'a, T> { - fn drop(&mut self) { - self.parent.flag.store(false, Ordering::Release); - } -} - -impl<'a, T> Deref for SpinlockGuard<'a, T> { - type Target = T; - - fn deref(&self) -> &T { - unsafe { &*self.parent.value.get() } - } -} - -impl<'a, T> DerefMut for SpinlockGuard<'a, T> { - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.parent.value.get() } - } -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/src/waker.rs cargo-0.47.0/vendor/crossbeam-channel/src/waker.rs --- cargo-0.44.1/vendor/crossbeam-channel/src/waker.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/src/waker.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,285 +0,0 @@ -//! Waking mechanism for threads blocked on channel operations. - -use std::sync::atomic::{AtomicBool, Ordering}; -use std::thread::{self, ThreadId}; - -use context::Context; -use select::{Operation, Selected}; -use utils::Spinlock; - -/// Represents a thread blocked on a specific channel operation. -pub struct Entry { - /// The operation. - pub oper: Operation, - - /// Optional packet. - pub packet: usize, - - /// Context associated with the thread owning this operation. - pub cx: Context, -} - -/// A queue of threads blocked on channel operations. -/// -/// This data structure is used by threads to register blocking operations and get woken up once -/// an operation becomes ready. -pub struct Waker { - /// A list of select operations. - selectors: Vec, - - /// A list of operations waiting to be ready. - observers: Vec, -} - -impl Waker { - /// Creates a new `Waker`. - #[inline] - pub fn new() -> Self { - Waker { - selectors: Vec::new(), - observers: Vec::new(), - } - } - - /// Registers a select operation. - #[inline] - pub fn register(&mut self, oper: Operation, cx: &Context) { - self.register_with_packet(oper, 0, cx); - } - - /// Registers a select operation and a packet. - #[inline] - pub fn register_with_packet(&mut self, oper: Operation, packet: usize, cx: &Context) { - self.selectors.push(Entry { - oper, - packet, - cx: cx.clone(), - }); - } - - /// Unregisters a select operation. - #[inline] - pub fn unregister(&mut self, oper: Operation) -> Option { - if let Some((i, _)) = self - .selectors - .iter() - .enumerate() - .find(|&(_, entry)| entry.oper == oper) - { - let entry = self.selectors.remove(i); - Some(entry) - } else { - None - } - } - - /// Attempts to find another thread's entry, select the operation, and wake it up. - #[inline] - pub fn try_select(&mut self) -> Option { - let mut entry = None; - - if !self.selectors.is_empty() { - let thread_id = current_thread_id(); - - for i in 0..self.selectors.len() { - // Does the entry belong to a different thread? - if self.selectors[i].cx.thread_id() != thread_id { - // Try selecting this operation. - let sel = Selected::Operation(self.selectors[i].oper); - let res = self.selectors[i].cx.try_select(sel); - - if res.is_ok() { - // Provide the packet. - self.selectors[i].cx.store_packet(self.selectors[i].packet); - // Wake the thread up. - self.selectors[i].cx.unpark(); - - // Remove the entry from the queue to keep it clean and improve - // performance. - entry = Some(self.selectors.remove(i)); - break; - } - } - } - } - - entry - } - - /// Returns `true` if there is an entry which can be selected by the current thread. - #[inline] - pub fn can_select(&self) -> bool { - if self.selectors.is_empty() { - false - } else { - let thread_id = current_thread_id(); - - self.selectors.iter().any(|entry| { - entry.cx.thread_id() != thread_id && entry.cx.selected() == Selected::Waiting - }) - } - } - - /// Registers an operation waiting to be ready. - #[inline] - pub fn watch(&mut self, oper: Operation, cx: &Context) { - self.observers.push(Entry { - oper, - packet: 0, - cx: cx.clone(), - }); - } - - /// Unregisters an operation waiting to be ready. - #[inline] - pub fn unwatch(&mut self, oper: Operation) { - self.observers.retain(|e| e.oper != oper); - } - - /// Notifies all operations waiting to be ready. - #[inline] - pub fn notify(&mut self) { - for entry in self.observers.drain(..) { - if entry.cx.try_select(Selected::Operation(entry.oper)).is_ok() { - entry.cx.unpark(); - } - } - } - - /// Notifies all registered operations that the channel is disconnected. - #[inline] - pub fn disconnect(&mut self) { - for entry in self.selectors.iter() { - if entry.cx.try_select(Selected::Disconnected).is_ok() { - // Wake the thread up. - // - // Here we don't remove the entry from the queue. Registered threads must - // unregister from the waker by themselves. They might also want to recover the - // packet value and destroy it, if necessary. - entry.cx.unpark(); - } - } - - self.notify(); - } -} - -impl Drop for Waker { - #[inline] - fn drop(&mut self) { - debug_assert_eq!(self.selectors.len(), 0); - debug_assert_eq!(self.observers.len(), 0); - } -} - -/// A waker that can be shared among threads without locking. -/// -/// This is a simple wrapper around `Waker` that internally uses a mutex for synchronization. -pub struct SyncWaker { - /// The inner `Waker`. - inner: Spinlock, - - /// `true` if the waker is empty. - is_empty: AtomicBool, -} - -impl SyncWaker { - /// Creates a new `SyncWaker`. - #[inline] - pub fn new() -> Self { - SyncWaker { - inner: Spinlock::new(Waker::new()), - is_empty: AtomicBool::new(true), - } - } - - /// Registers the current thread with an operation. - #[inline] - pub fn register(&self, oper: Operation, cx: &Context) { - let mut inner = self.inner.lock(); - inner.register(oper, cx); - self.is_empty.store( - inner.selectors.is_empty() && inner.observers.is_empty(), - Ordering::SeqCst, - ); - } - - /// Unregisters an operation previously registered by the current thread. - #[inline] - pub fn unregister(&self, oper: Operation) -> Option { - let mut inner = self.inner.lock(); - let entry = inner.unregister(oper); - self.is_empty.store( - inner.selectors.is_empty() && inner.observers.is_empty(), - Ordering::SeqCst, - ); - entry - } - - /// Attempts to find one thread (not the current one), select its operation, and wake it up. - #[inline] - pub fn notify(&self) { - if !self.is_empty.load(Ordering::SeqCst) { - let mut inner = self.inner.lock(); - inner.try_select(); - inner.notify(); - self.is_empty.store( - inner.selectors.is_empty() && inner.observers.is_empty(), - Ordering::SeqCst, - ); - } - } - - /// Registers an operation waiting to be ready. - #[inline] - pub fn watch(&self, oper: Operation, cx: &Context) { - let mut inner = self.inner.lock(); - inner.watch(oper, cx); - self.is_empty.store( - inner.selectors.is_empty() && inner.observers.is_empty(), - Ordering::SeqCst, - ); - } - - /// Unregisters an operation waiting to be ready. - #[inline] - pub fn unwatch(&self, oper: Operation) { - let mut inner = self.inner.lock(); - inner.unwatch(oper); - self.is_empty.store( - inner.selectors.is_empty() && inner.observers.is_empty(), - Ordering::SeqCst, - ); - } - - /// Notifies all threads that the channel is disconnected. - #[inline] - pub fn disconnect(&self) { - let mut inner = self.inner.lock(); - inner.disconnect(); - self.is_empty.store( - inner.selectors.is_empty() && inner.observers.is_empty(), - Ordering::SeqCst, - ); - } -} - -impl Drop for SyncWaker { - #[inline] - fn drop(&mut self) { - debug_assert_eq!(self.is_empty.load(Ordering::SeqCst), true); - } -} - -/// Returns the id of the current thread. -#[inline] -fn current_thread_id() -> ThreadId { - thread_local! { - /// Cached thread-local id. - static THREAD_ID: ThreadId = thread::current().id(); - } - - THREAD_ID - .try_with(|id| *id) - .unwrap_or_else(|_| thread::current().id()) -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/tests/after.rs cargo-0.47.0/vendor/crossbeam-channel/tests/after.rs --- cargo-0.44.1/vendor/crossbeam-channel/tests/after.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/tests/after.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,339 +0,0 @@ -//! Tests for the after channel flavor. - -#[macro_use] -extern crate crossbeam_channel; -extern crate crossbeam_utils; -extern crate rand; - -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_channel::{after, Select, TryRecvError}; -use crossbeam_utils::thread::scope; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn fire() { - let start = Instant::now(); - let r = after(ms(50)); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - thread::sleep(ms(100)); - - let fired = r.try_recv().unwrap(); - assert!(start < fired); - assert!(fired - start >= ms(50)); - - let now = Instant::now(); - assert!(fired < now); - assert!(now - fired >= ms(50)); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - - select! { - recv(r) -> _ => panic!(), - default => {} - } - - select! { - recv(r) -> _ => panic!(), - recv(after(ms(200))) -> _ => {} - } -} - -#[test] -fn capacity() { - const COUNT: usize = 10; - - for i in 0..COUNT { - let r = after(ms(i as u64)); - assert_eq!(r.capacity(), Some(1)); - } -} - -#[test] -fn len_empty_full() { - let r = after(ms(50)); - - assert_eq!(r.len(), 0); - assert_eq!(r.is_empty(), true); - assert_eq!(r.is_full(), false); - - thread::sleep(ms(100)); - - assert_eq!(r.len(), 1); - assert_eq!(r.is_empty(), false); - assert_eq!(r.is_full(), true); - - r.try_recv().unwrap(); - - assert_eq!(r.len(), 0); - assert_eq!(r.is_empty(), true); - assert_eq!(r.is_full(), false); -} - -#[test] -fn try_recv() { - let r = after(ms(200)); - assert!(r.try_recv().is_err()); - - thread::sleep(ms(100)); - assert!(r.try_recv().is_err()); - - thread::sleep(ms(200)); - assert!(r.try_recv().is_ok()); - assert!(r.try_recv().is_err()); - - thread::sleep(ms(200)); - assert!(r.try_recv().is_err()); -} - -#[test] -fn recv() { - let start = Instant::now(); - let r = after(ms(50)); - - let fired = r.recv().unwrap(); - assert!(start < fired); - assert!(fired - start >= ms(50)); - - let now = Instant::now(); - assert!(fired < now); - assert!(now - fired < fired - start); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn recv_timeout() { - let start = Instant::now(); - let r = after(ms(200)); - - assert!(r.recv_timeout(ms(100)).is_err()); - let now = Instant::now(); - assert!(now - start >= ms(100)); - assert!(now - start <= ms(150)); - - let fired = r.recv_timeout(ms(200)).unwrap(); - assert!(fired - start >= ms(200)); - assert!(fired - start <= ms(250)); - - assert!(r.recv_timeout(ms(200)).is_err()); - let now = Instant::now(); - assert!(now - start >= ms(400)); - assert!(now - start <= ms(450)); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn recv_two() { - let r1 = after(ms(50)); - let r2 = after(ms(50)); - - scope(|scope| { - scope.spawn(|_| { - select! { - recv(r1) -> _ => {} - recv(r2) -> _ => {} - } - }); - scope.spawn(|_| { - select! { - recv(r1) -> _ => {} - recv(r2) -> _ => {} - } - }); - }) - .unwrap(); -} - -#[test] -fn recv_race() { - select! { - recv(after(ms(50))) -> _ => {} - recv(after(ms(100))) -> _ => panic!(), - } - - select! { - recv(after(ms(100))) -> _ => panic!(), - recv(after(ms(50))) -> _ => {} - } -} - -#[test] -fn stress_default() { - const COUNT: usize = 10; - - for _ in 0..COUNT { - select! { - recv(after(ms(0))) -> _ => {} - default => panic!(), - } - } - - for _ in 0..COUNT { - select! { - recv(after(ms(100))) -> _ => panic!(), - default => {} - } - } -} - -#[test] -fn select() { - const THREADS: usize = 4; - const COUNT: usize = 1000; - const TIMEOUT_MS: u64 = 100; - - let v = (0..COUNT) - .map(|i| after(ms(i as u64 / TIMEOUT_MS / 2))) - .collect::>(); - let hits = AtomicUsize::new(0); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - let v: Vec<&_> = v.iter().collect(); - - loop { - let timeout = after(ms(TIMEOUT_MS)); - let mut sel = Select::new(); - for r in &v { - sel.recv(r); - } - let oper_timeout = sel.recv(&timeout); - - let oper = sel.select(); - match oper.index() { - i if i == oper_timeout => { - oper.recv(&timeout).unwrap(); - break; - } - i => { - oper.recv(&v[i]).unwrap(); - hits.fetch_add(1, Ordering::SeqCst); - } - } - } - }); - } - }) - .unwrap(); - - assert_eq!(hits.load(Ordering::SeqCst), COUNT); -} - -#[test] -fn ready() { - const THREADS: usize = 4; - const COUNT: usize = 1000; - const TIMEOUT_MS: u64 = 100; - - let v = (0..COUNT) - .map(|i| after(ms(i as u64 / TIMEOUT_MS / 2))) - .collect::>(); - let hits = AtomicUsize::new(0); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - let v: Vec<&_> = v.iter().collect(); - - loop { - let timeout = after(ms(TIMEOUT_MS)); - let mut sel = Select::new(); - for r in &v { - sel.recv(r); - } - let oper_timeout = sel.recv(&timeout); - - loop { - let i = sel.ready(); - if i == oper_timeout { - timeout.try_recv().unwrap(); - return; - } else if v[i].try_recv().is_ok() { - hits.fetch_add(1, Ordering::SeqCst); - break; - } - } - } - }); - } - }) - .unwrap(); - - assert_eq!(hits.load(Ordering::SeqCst), COUNT); -} - -#[test] -fn stress_clone() { - const RUNS: usize = 1000; - const THREADS: usize = 10; - const COUNT: usize = 50; - - for i in 0..RUNS { - let r = after(ms(i as u64)); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - let r = r.clone(); - let _ = r.try_recv(); - - for _ in 0..COUNT { - drop(r.clone()); - thread::yield_now(); - } - }); - } - }) - .unwrap(); - } -} - -#[test] -fn fairness() { - const COUNT: usize = 1000; - - for &dur in &[0, 1] { - let mut hits = [0usize; 2]; - - for _ in 0..COUNT { - select! { - recv(after(ms(dur))) -> _ => hits[0] += 1, - recv(after(ms(dur))) -> _ => hits[1] += 1, - } - } - - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); - } -} - -#[test] -fn fairness_duplicates() { - const COUNT: usize = 1000; - - for &dur in &[0, 1] { - let mut hits = [0usize; 5]; - - for _ in 0..COUNT { - let r = after(ms(dur)); - select! { - recv(r) -> _ => hits[0] += 1, - recv(r) -> _ => hits[1] += 1, - recv(r) -> _ => hits[2] += 1, - recv(r) -> _ => hits[3] += 1, - recv(r) -> _ => hits[4] += 1, - } - } - - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); - } -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/tests/array.rs cargo-0.47.0/vendor/crossbeam-channel/tests/array.rs --- cargo-0.44.1/vendor/crossbeam-channel/tests/array.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/tests/array.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,659 +0,0 @@ -//! Tests for the array channel flavor. - -#[macro_use] -extern crate crossbeam_channel; -extern crate crossbeam_utils; -extern crate rand; - -use std::any::Any; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::thread; -use std::time::Duration; - -use crossbeam_channel::{bounded, Receiver}; -use crossbeam_channel::{RecvError, RecvTimeoutError, TryRecvError}; -use crossbeam_channel::{SendError, SendTimeoutError, TrySendError}; -use crossbeam_utils::thread::scope; -use rand::{thread_rng, Rng}; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn smoke() { - let (s, r) = bounded(1); - s.send(7).unwrap(); - assert_eq!(r.try_recv(), Ok(7)); - - s.send(8).unwrap(); - assert_eq!(r.recv(), Ok(8)); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout)); -} - -#[test] -fn capacity() { - for i in 1..10 { - let (s, r) = bounded::<()>(i); - assert_eq!(s.capacity(), Some(i)); - assert_eq!(r.capacity(), Some(i)); - } -} - -#[test] -fn len_empty_full() { - let (s, r) = bounded(2); - - assert_eq!(s.len(), 0); - assert_eq!(s.is_empty(), true); - assert_eq!(s.is_full(), false); - assert_eq!(r.len(), 0); - assert_eq!(r.is_empty(), true); - assert_eq!(r.is_full(), false); - - s.send(()).unwrap(); - - assert_eq!(s.len(), 1); - assert_eq!(s.is_empty(), false); - assert_eq!(s.is_full(), false); - assert_eq!(r.len(), 1); - assert_eq!(r.is_empty(), false); - assert_eq!(r.is_full(), false); - - s.send(()).unwrap(); - - assert_eq!(s.len(), 2); - assert_eq!(s.is_empty(), false); - assert_eq!(s.is_full(), true); - assert_eq!(r.len(), 2); - assert_eq!(r.is_empty(), false); - assert_eq!(r.is_full(), true); - - r.recv().unwrap(); - - assert_eq!(s.len(), 1); - assert_eq!(s.is_empty(), false); - assert_eq!(s.is_full(), false); - assert_eq!(r.len(), 1); - assert_eq!(r.is_empty(), false); - assert_eq!(r.is_full(), false); -} - -#[test] -fn try_recv() { - let (s, r) = bounded(100); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - thread::sleep(ms(1500)); - assert_eq!(r.try_recv(), Ok(7)); - thread::sleep(ms(500)); - assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - s.send(7).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn recv() { - let (s, r) = bounded(100); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv(), Ok(7)); - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(8)); - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(9)); - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - s.send(7).unwrap(); - s.send(8).unwrap(); - s.send(9).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn recv_timeout() { - let (s, r) = bounded::(100); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout)); - assert_eq!(r.recv_timeout(ms(1000)), Ok(7)); - assert_eq!( - r.recv_timeout(ms(1000)), - Err(RecvTimeoutError::Disconnected) - ); - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - s.send(7).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn try_send() { - let (s, r) = bounded(1); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(s.try_send(1), Ok(())); - assert_eq!(s.try_send(2), Err(TrySendError::Full(2))); - thread::sleep(ms(1500)); - assert_eq!(s.try_send(3), Ok(())); - thread::sleep(ms(500)); - assert_eq!(s.try_send(4), Err(TrySendError::Disconnected(4))); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - assert_eq!(r.try_recv(), Ok(1)); - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - assert_eq!(r.recv(), Ok(3)); - }); - }) - .unwrap(); -} - -#[test] -fn send() { - let (s, r) = bounded(1); - - scope(|scope| { - scope.spawn(|_| { - s.send(7).unwrap(); - thread::sleep(ms(1000)); - s.send(8).unwrap(); - thread::sleep(ms(1000)); - s.send(9).unwrap(); - thread::sleep(ms(1000)); - s.send(10).unwrap(); - }); - scope.spawn(|_| { - thread::sleep(ms(1500)); - assert_eq!(r.recv(), Ok(7)); - assert_eq!(r.recv(), Ok(8)); - assert_eq!(r.recv(), Ok(9)); - }); - }) - .unwrap(); -} - -#[test] -fn send_timeout() { - let (s, r) = bounded(2); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(s.send_timeout(1, ms(1000)), Ok(())); - assert_eq!(s.send_timeout(2, ms(1000)), Ok(())); - assert_eq!( - s.send_timeout(3, ms(500)), - Err(SendTimeoutError::Timeout(3)) - ); - thread::sleep(ms(1000)); - assert_eq!(s.send_timeout(4, ms(1000)), Ok(())); - thread::sleep(ms(1000)); - assert_eq!(s.send(5), Err(SendError(5))); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(1)); - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(2)); - assert_eq!(r.recv(), Ok(4)); - }); - }) - .unwrap(); -} - -#[test] -fn send_after_disconnect() { - let (s, r) = bounded(100); - - s.send(1).unwrap(); - s.send(2).unwrap(); - s.send(3).unwrap(); - - drop(r); - - assert_eq!(s.send(4), Err(SendError(4))); - assert_eq!(s.try_send(5), Err(TrySendError::Disconnected(5))); - assert_eq!( - s.send_timeout(6, ms(500)), - Err(SendTimeoutError::Disconnected(6)) - ); -} - -#[test] -fn recv_after_disconnect() { - let (s, r) = bounded(100); - - s.send(1).unwrap(); - s.send(2).unwrap(); - s.send(3).unwrap(); - - drop(s); - - assert_eq!(r.recv(), Ok(1)); - assert_eq!(r.recv(), Ok(2)); - assert_eq!(r.recv(), Ok(3)); - assert_eq!(r.recv(), Err(RecvError)); -} - -#[test] -fn len() { - const COUNT: usize = 25_000; - const CAP: usize = 1000; - - let (s, r) = bounded(CAP); - - assert_eq!(s.len(), 0); - assert_eq!(r.len(), 0); - - for _ in 0..CAP / 10 { - for i in 0..50 { - s.send(i).unwrap(); - assert_eq!(s.len(), i + 1); - } - - for i in 0..50 { - r.recv().unwrap(); - assert_eq!(r.len(), 50 - i - 1); - } - } - - assert_eq!(s.len(), 0); - assert_eq!(r.len(), 0); - - for i in 0..CAP { - s.send(i).unwrap(); - assert_eq!(s.len(), i + 1); - } - - for _ in 0..CAP { - r.recv().unwrap(); - } - - assert_eq!(s.len(), 0); - assert_eq!(r.len(), 0); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - assert_eq!(r.recv(), Ok(i)); - let len = r.len(); - assert!(len <= CAP); - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - s.send(i).unwrap(); - let len = s.len(); - assert!(len <= CAP); - } - }); - }) - .unwrap(); - - assert_eq!(s.len(), 0); - assert_eq!(r.len(), 0); -} - -#[test] -fn disconnect_wakes_sender() { - let (s, r) = bounded(1); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(s.send(()), Ok(())); - assert_eq!(s.send(()), Err(SendError(()))); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - drop(r); - }); - }) - .unwrap(); -} - -#[test] -fn disconnect_wakes_receiver() { - let (s, r) = bounded::<()>(1); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - drop(s); - }); - }) - .unwrap(); -} - -#[test] -fn spsc() { - const COUNT: usize = 100_000; - - let (s, r) = bounded(3); - - scope(|scope| { - scope.spawn(move |_| { - for i in 0..COUNT { - assert_eq!(r.recv(), Ok(i)); - } - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - for i in 0..COUNT { - s.send(i).unwrap(); - } - }); - }) - .unwrap(); -} - -#[test] -fn mpmc() { - const COUNT: usize = 25_000; - const THREADS: usize = 4; - - let (s, r) = bounded::(3); - let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..COUNT { - let n = r.recv().unwrap(); - v[n].fetch_add(1, Ordering::SeqCst); - } - }); - } - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..COUNT { - s.send(i).unwrap(); - } - }); - } - }) - .unwrap(); - - for c in v { - assert_eq!(c.load(Ordering::SeqCst), THREADS); - } -} - -#[test] -fn stress_oneshot() { - const COUNT: usize = 10_000; - - for _ in 0..COUNT { - let (s, r) = bounded(1); - - scope(|scope| { - scope.spawn(|_| r.recv().unwrap()); - scope.spawn(|_| s.send(0).unwrap()); - }) - .unwrap(); - } -} - -#[test] -fn stress_iter() { - const COUNT: usize = 100_000; - - let (request_s, request_r) = bounded(1); - let (response_s, response_r) = bounded(1); - - scope(|scope| { - scope.spawn(move |_| { - let mut count = 0; - loop { - for x in response_r.try_iter() { - count += x; - if count == COUNT { - return; - } - } - request_s.send(()).unwrap(); - } - }); - - for _ in request_r.iter() { - if response_s.send(1).is_err() { - break; - } - } - }) - .unwrap(); -} - -#[test] -fn stress_timeout_two_threads() { - const COUNT: usize = 100; - - let (s, r) = bounded(2); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(50)); - } - loop { - if let Ok(()) = s.send_timeout(i, ms(10)) { - break; - } - } - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(50)); - } - loop { - if let Ok(x) = r.recv_timeout(ms(10)) { - assert_eq!(x, i); - break; - } - } - } - }); - }) - .unwrap(); -} - -#[test] -fn drops() { - const RUNS: usize = 100; - - static DROPS: AtomicUsize = AtomicUsize::new(0); - - #[derive(Debug, PartialEq)] - struct DropCounter; - - impl Drop for DropCounter { - fn drop(&mut self) { - DROPS.fetch_add(1, Ordering::SeqCst); - } - } - - let mut rng = thread_rng(); - - for _ in 0..RUNS { - let steps = rng.gen_range(0, 10_000); - let additional = rng.gen_range(0, 50); - - DROPS.store(0, Ordering::SeqCst); - let (s, r) = bounded::(50); - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..steps { - r.recv().unwrap(); - } - }); - - scope.spawn(|_| { - for _ in 0..steps { - s.send(DropCounter).unwrap(); - } - }); - }) - .unwrap(); - - for _ in 0..additional { - s.send(DropCounter).unwrap(); - } - - assert_eq!(DROPS.load(Ordering::SeqCst), steps); - drop(s); - drop(r); - assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); - } -} - -#[test] -fn linearizable() { - const COUNT: usize = 25_000; - const THREADS: usize = 4; - - let (s, r) = bounded(THREADS); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..COUNT { - s.send(0).unwrap(); - r.try_recv().unwrap(); - } - }); - } - }) - .unwrap(); -} - -#[test] -fn fairness() { - const COUNT: usize = 10_000; - - let (s1, r1) = bounded::<()>(COUNT); - let (s2, r2) = bounded::<()>(COUNT); - - for _ in 0..COUNT { - s1.send(()).unwrap(); - s2.send(()).unwrap(); - } - - let mut hits = [0usize; 2]; - for _ in 0..COUNT { - select! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); -} - -#[test] -fn fairness_duplicates() { - const COUNT: usize = 10_000; - - let (s, r) = bounded::<()>(COUNT); - - for _ in 0..COUNT { - s.send(()).unwrap(); - } - - let mut hits = [0usize; 5]; - for _ in 0..COUNT { - select! { - recv(r) -> _ => hits[0] += 1, - recv(r) -> _ => hits[1] += 1, - recv(r) -> _ => hits[2] += 1, - recv(r) -> _ => hits[3] += 1, - recv(r) -> _ => hits[4] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); -} - -#[test] -fn recv_in_send() { - let (s, _r) = bounded(1); - s.send(()).unwrap(); - - #[allow(unreachable_code)] - { - select! { - send(s, panic!()) -> _ => panic!(), - default => {} - } - } - - let (s, r) = bounded(2); - s.send(()).unwrap(); - - select! { - send(s, assert_eq!(r.recv(), Ok(()))) -> _ => {} - } -} - -#[test] -fn channel_through_channel() { - const COUNT: usize = 1000; - - type T = Box; - - let (s, r) = bounded::(1); - - scope(|scope| { - scope.spawn(move |_| { - let mut s = s; - - for _ in 0..COUNT { - let (new_s, new_r) = bounded(1); - let new_r: T = Box::new(Some(new_r)); - - s.send(new_r).unwrap(); - s = new_s; - } - }); - - scope.spawn(move |_| { - let mut r = r; - - for _ in 0..COUNT { - r = r - .recv() - .unwrap() - .downcast_mut::>>() - .unwrap() - .take() - .unwrap() - } - }); - }) - .unwrap(); -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/tests/golang.rs cargo-0.47.0/vendor/crossbeam-channel/tests/golang.rs --- cargo-0.44.1/vendor/crossbeam-channel/tests/golang.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/tests/golang.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1448 +0,0 @@ -//! Tests copied from Go and manually rewritten in Rust. -//! -//! Source: -//! - https://github.com/golang/go -//! -//! Copyright & License: -//! - Copyright (c) 2009 The Go Authors -//! - https://golang.org/AUTHORS -//! - https://golang.org/LICENSE -//! - https://golang.org/PATENTS - -#[macro_use] -extern crate crossbeam_channel; - -use std::any::Any; -use std::cell::Cell; -use std::collections::HashMap; -use std::sync::{Arc, Condvar, Mutex}; -use std::thread; -use std::time::Duration; - -use crossbeam_channel::{bounded, tick, Receiver, Select, Sender}; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -struct Chan { - inner: Arc>>, -} - -struct ChanInner { - s: Option>, - r: Receiver, -} - -impl Clone for Chan { - fn clone(&self) -> Chan { - Chan { - inner: self.inner.clone(), - } - } -} - -impl Chan { - fn send(&self, msg: T) { - let s = self - .inner - .lock() - .unwrap() - .s - .as_ref() - .expect("sending into closed channel") - .clone(); - let _ = s.send(msg); - } - - fn try_recv(&self) -> Option { - let r = self.inner.lock().unwrap().r.clone(); - r.try_recv().ok() - } - - fn recv(&self) -> Option { - let r = self.inner.lock().unwrap().r.clone(); - r.recv().ok() - } - - fn close(&self) { - self.inner - .lock() - .unwrap() - .s - .take() - .expect("channel already closed"); - } - - fn rx(&self) -> Receiver { - self.inner.lock().unwrap().r.clone() - } - - fn tx(&self) -> Sender { - match self.inner.lock().unwrap().s.as_ref() { - None => { - let (s, r) = bounded(0); - std::mem::forget(r); - s - } - Some(s) => s.clone(), - } - } -} - -impl Iterator for Chan { - type Item = T; - - fn next(&mut self) -> Option { - self.recv() - } -} - -impl<'a, T> IntoIterator for &'a Chan { - type Item = T; - type IntoIter = Chan; - - fn into_iter(self) -> Self::IntoIter { - self.clone() - } -} - -fn make(cap: usize) -> Chan { - let (s, r) = bounded(cap); - Chan { - inner: Arc::new(Mutex::new(ChanInner { s: Some(s), r })), - } -} - -#[derive(Clone)] -struct WaitGroup(Arc); - -struct WaitGroupInner { - cond: Condvar, - count: Mutex, -} - -impl WaitGroup { - fn new() -> WaitGroup { - WaitGroup(Arc::new(WaitGroupInner { - cond: Condvar::new(), - count: Mutex::new(0), - })) - } - - fn add(&self, delta: i32) { - let mut count = self.0.count.lock().unwrap(); - *count += delta; - assert!(*count >= 0); - self.0.cond.notify_all(); - } - - fn done(&self) { - self.add(-1); - } - - fn wait(&self) { - let mut count = self.0.count.lock().unwrap(); - while *count > 0 { - count = self.0.cond.wait(count).unwrap(); - } - } -} - -struct Defer { - f: Option>, -} - -impl Drop for Defer { - fn drop(&mut self) { - let f = self.f.take().unwrap(); - let mut f = Some(f); - let mut f = move || f.take().unwrap()(); - f(); - } -} - -macro_rules! defer { - ($body:expr) => { - let _defer = Defer { - f: Some(Box::new(|| $body)), - }; - }; -} - -macro_rules! go { - (@parse ref $v:ident, $($tail:tt)*) => {{ - let ref $v = $v; - go!(@parse $($tail)*) - }}; - (@parse move $v:ident, $($tail:tt)*) => {{ - let $v = $v; - go!(@parse $($tail)*) - }}; - (@parse $v:ident, $($tail:tt)*) => {{ - let $v = $v.clone(); - go!(@parse $($tail)*) - }}; - (@parse $body:expr) => { - ::std::thread::spawn(move || { - let res = ::std::panic::catch_unwind(::std::panic::AssertUnwindSafe(|| { - $body - })); - if res.is_err() { - eprintln!("goroutine panicked: {:?}", res); - ::std::process::abort(); - } - }) - }; - (@parse $($tail:tt)*) => { - compile_error!("invalid `go!` syntax") - }; - ($($tail:tt)*) => {{ - go!(@parse $($tail)*) - }}; -} - -// https://github.com/golang/go/blob/master/test/chan/doubleselect.go -mod doubleselect { - use super::*; - - const ITERATIONS: i32 = 10_000; - - fn sender(n: i32, c1: Chan, c2: Chan, c3: Chan, c4: Chan) { - defer! { c1.close() } - defer! { c2.close() } - defer! { c3.close() } - defer! { c4.close() } - - for i in 0..n { - select! { - send(c1.tx(), i) -> _ => {} - send(c2.tx(), i) -> _ => {} - send(c3.tx(), i) -> _ => {} - send(c4.tx(), i) -> _ => {} - } - } - } - - fn mux(out: Chan, inp: Chan, done: Chan) { - for v in inp { - out.send(v); - } - done.send(true); - } - - fn recver(inp: Chan) { - let mut seen = HashMap::new(); - - for v in &inp { - if seen.contains_key(&v) { - panic!("got duplicate value for {}", v); - } - seen.insert(v, true); - } - } - - #[test] - fn main() { - let c1 = make::(0); - let c2 = make::(0); - let c3 = make::(0); - let c4 = make::(0); - let done = make::(0); - let cmux = make::(0); - - go!(c1, c2, c3, c4, sender(ITERATIONS, c1, c2, c3, c4)); - go!(cmux, c1, done, mux(cmux, c1, done)); - go!(cmux, c2, done, mux(cmux, c2, done)); - go!(cmux, c3, done, mux(cmux, c3, done)); - go!(cmux, c4, done, mux(cmux, c4, done)); - go!(done, cmux, { - done.recv(); - done.recv(); - done.recv(); - done.recv(); - cmux.close(); - }); - recver(cmux); - } -} - -// https://github.com/golang/go/blob/master/test/chan/fifo.go -mod fifo { - use super::*; - - const N: i32 = 10; - - #[test] - fn asynch_fifo() { - let ch = make::(N as usize); - for i in 0..N { - ch.send(i); - } - for i in 0..N { - if ch.recv() != Some(i) { - panic!("bad receive"); - } - } - } - - fn chain(ch: Chan, val: i32, inp: Chan, out: Chan) { - inp.recv(); - if ch.recv() != Some(val) { - panic!(val); - } - out.send(1); - } - - #[test] - fn synch_fifo() { - let ch = make::(0); - let mut inp = make::(0); - let start = inp.clone(); - - for i in 0..N { - let out = make::(0); - go!(ch, i, inp, out, chain(ch, i, inp, out)); - inp = out; - } - - start.send(0); - for i in 0..N { - ch.send(i); - } - inp.recv(); - } -} - -// https://github.com/golang/go/blob/master/test/chan/goroutines.go -mod goroutines { - use super::*; - - fn f(left: Chan, right: Chan) { - left.send(right.recv().unwrap()); - } - - #[test] - fn main() { - let n = 100i32; - - let leftmost = make::(0); - let mut right = leftmost.clone(); - let mut left = leftmost.clone(); - - for _ in 0..n { - right = make::(0); - go!(left, right, f(left, right)); - left = right.clone(); - } - - go!(right, right.send(1)); - leftmost.recv().unwrap(); - } -} - -// https://github.com/golang/go/blob/master/test/chan/nonblock.go -mod nonblock { - use super::*; - - fn i32receiver(c: Chan, strobe: Chan) { - if c.recv().unwrap() != 123 { - panic!("i32 value"); - } - strobe.send(true); - } - - fn i32sender(c: Chan, strobe: Chan) { - c.send(234); - strobe.send(true); - } - - fn i64receiver(c: Chan, strobe: Chan) { - if c.recv().unwrap() != 123456 { - panic!("i64 value"); - } - strobe.send(true); - } - - fn i64sender(c: Chan, strobe: Chan) { - c.send(234567); - strobe.send(true); - } - - fn breceiver(c: Chan, strobe: Chan) { - if !c.recv().unwrap() { - panic!("b value"); - } - strobe.send(true); - } - - fn bsender(c: Chan, strobe: Chan) { - c.send(true); - strobe.send(true); - } - - fn sreceiver(c: Chan, strobe: Chan) { - if c.recv().unwrap() != "hello" { - panic!("x value"); - } - strobe.send(true); - } - - fn ssender(c: Chan, strobe: Chan) { - c.send("hello again".to_string()); - strobe.send(true); - } - - const MAX_TRIES: usize = 10000; // Up to 100ms per test. - - #[test] - fn main() { - let ticker = tick(Duration::new(0, 10_000)); // 10 us - let sleep = || { - ticker.recv().unwrap(); - ticker.recv().unwrap(); - thread::yield_now(); - thread::yield_now(); - thread::yield_now(); - }; - - let sync = make::(0); - - for buffer in 0..2 { - let c32 = make::(buffer); - let c64 = make::(buffer); - let cb = make::(buffer); - let cs = make::(buffer); - - select! { - recv(c32.rx()) -> _ => panic!("blocked i32sender"), - default => {} - } - - select! { - recv(c64.rx()) -> _ => panic!("blocked i64sender"), - default => {} - } - - select! { - recv(cb.rx()) -> _ => panic!("blocked bsender"), - default => {} - } - - select! { - recv(cs.rx()) -> _ => panic!("blocked ssender"), - default => {} - } - - go!(c32, sync, i32receiver(c32, sync)); - let mut try = 0; - loop { - select! { - send(c32.tx(), 123) -> _ => break, - default => { - try += 1; - if try > MAX_TRIES { - println!("i32receiver buffer={}", buffer); - panic!("fail") - } - sleep(); - } - } - } - sync.recv(); - go!(c32, sync, i32sender(c32, sync)); - if buffer > 0 { - sync.recv(); - } - let mut try = 0; - loop { - select! { - recv(c32.rx()) -> v => { - if v != Ok(234) { - panic!("i32sender value"); - } - break; - } - default => { - try += 1; - if try > MAX_TRIES { - println!("i32sender buffer={}", buffer); - panic!("fail"); - } - sleep(); - } - } - } - if buffer == 0 { - sync.recv(); - } - - go!(c64, sync, i64receiver(c64, sync)); - let mut try = 0; - loop { - select! { - send(c64.tx(), 123456) -> _ => break, - default => { - try += 1; - if try > MAX_TRIES { - println!("i64receiver buffer={}", buffer); - panic!("fail") - } - sleep(); - } - } - } - sync.recv(); - go!(c64, sync, i64sender(c64, sync)); - if buffer > 0 { - sync.recv(); - } - let mut try = 0; - loop { - select! { - recv(c64.rx()) -> v => { - if v != Ok(234567) { - panic!("i64sender value"); - } - break; - } - default => { - try += 1; - if try > MAX_TRIES { - println!("i64sender buffer={}", buffer); - panic!("fail"); - } - sleep(); - } - } - } - if buffer == 0 { - sync.recv(); - } - - go!(cb, sync, breceiver(cb, sync)); - let mut try = 0; - loop { - select! { - send(cb.tx(), true) -> _ => break, - default => { - try += 1; - if try > MAX_TRIES { - println!("breceiver buffer={}", buffer); - panic!("fail") - } - sleep(); - } - } - } - sync.recv(); - go!(cb, sync, bsender(cb, sync)); - if buffer > 0 { - sync.recv(); - } - let mut try = 0; - loop { - select! { - recv(cb.rx()) -> v => { - if v != Ok(true) { - panic!("bsender value"); - } - break; - } - default => { - try += 1; - if try > MAX_TRIES { - println!("bsender buffer={}", buffer); - panic!("fail"); - } - sleep(); - } - } - } - if buffer == 0 { - sync.recv(); - } - - go!(cs, sync, sreceiver(cs, sync)); - let mut try = 0; - loop { - select! { - send(cs.tx(), "hello".to_string()) -> _ => break, - default => { - try += 1; - if try > MAX_TRIES { - println!("sreceiver buffer={}", buffer); - panic!("fail") - } - sleep(); - } - } - } - sync.recv(); - go!(cs, sync, ssender(cs, sync)); - if buffer > 0 { - sync.recv(); - } - let mut try = 0; - loop { - select! { - recv(cs.rx()) -> v => { - if v != Ok("hello again".to_string()) { - panic!("ssender value"); - } - break; - } - default => { - try += 1; - if try > MAX_TRIES { - println!("ssender buffer={}", buffer); - panic!("fail"); - } - sleep(); - } - } - } - if buffer == 0 { - sync.recv(); - } - } - } -} - -// https://github.com/golang/go/blob/master/test/chan/select.go -mod select { - use super::*; - - #[test] - fn main() { - let shift = Cell::new(0); - let counter = Cell::new(0); - - let get_value = || { - counter.set(counter.get() + 1); - 1 << shift.get() - }; - - let send = |mut a: Option<&Chan>, mut b: Option<&Chan>| { - let mut i = 0; - let never = make::(0); - loop { - let nil1 = never.tx(); - let nil2 = never.tx(); - let v1 = get_value(); - let v2 = get_value(); - select! { - send(a.map(|c| c.tx()).unwrap_or(nil1), v1) -> _ => { - i += 1; - a = None; - } - send(b.map(|c| c.tx()).unwrap_or(nil2), v2) -> _ => { - i += 1; - b = None; - } - default => break, - } - shift.set(shift.get() + 1); - } - i - }; - - let a = make::(1); - let b = make::(1); - - assert_eq!(send(Some(&a), Some(&b)), 2); - - let av = a.recv().unwrap(); - let bv = b.recv().unwrap(); - assert_eq!(av | bv, 3); - - assert_eq!(send(Some(&a), None), 1); - assert_eq!(counter.get(), 10); - } -} - -// https://github.com/golang/go/blob/master/test/chan/select2.go -mod select2 { - // TODO -} - -// https://github.com/golang/go/blob/master/test/chan/select3.go -mod select3 { - // TODO -} - -// https://github.com/golang/go/blob/master/test/chan/select4.go -mod select4 { - use super::*; - - #[test] - fn main() { - let c = make::(1); - let c1 = make::(0); - c.send(42); - select! { - recv(c1.rx()) -> _ => panic!("BUG"), - recv(c.rx()) -> v => assert_eq!(v, Ok(42)), - } - } -} - -// https://github.com/golang/go/blob/master/test/chan/select6.go -mod select6 { - use super::*; - - #[test] - fn main() { - let c1 = make::(0); - let c2 = make::(0); - let c3 = make::(0); - - go!(c1, c1.recv()); - go!(c1, c2, c3, { - select! { - recv(c1.rx()) -> _ => panic!("dummy"), - recv(c2.rx()) -> _ => c3.send(true), - } - c1.recv(); - }); - go!(c2, c2.send(true)); - - c3.recv(); - c1.send(true); - c1.send(true); - } -} - -// https://github.com/golang/go/blob/master/test/chan/select7.go -mod select7 { - use super::*; - - fn recv1(c: Chan) { - c.recv().unwrap(); - } - - fn recv2(c: Chan) { - select! { - recv(c.rx()) -> _ => () - } - } - - fn recv3(c: Chan) { - let c2 = make::(1); - select! { - recv(c.rx()) -> _ => (), - recv(c2.rx()) -> _ => () - } - } - - fn send1(recv: fn(Chan)) { - let c = make::(1); - go!(c, recv(c)); - thread::yield_now(); - c.send(1); - } - - fn send2(recv: fn(Chan)) { - let c = make::(1); - go!(c, recv(c)); - thread::yield_now(); - select! { - send(c.tx(), 1) -> _ => () - } - } - - fn send3(recv: fn(Chan)) { - let c = make::(1); - go!(c, recv(c)); - thread::yield_now(); - let c2 = make::(1); - select! { - send(c.tx(), 1) -> _ => (), - send(c2.tx(), 1) -> _ => () - } - } - - #[test] - fn main() { - send1(recv1); - send2(recv1); - send3(recv1); - send1(recv2); - send2(recv2); - send3(recv2); - send1(recv3); - send2(recv3); - send3(recv3); - } -} - -// https://github.com/golang/go/blob/master/test/chan/sieve1.go -mod sieve1 { - use super::*; - - fn generate(ch: Chan) { - let mut i = 2; - loop { - ch.send(i); - i += 1; - } - } - - fn filter(in_ch: Chan, out_ch: Chan, prime: i32) { - for i in in_ch { - if i % prime != 0 { - out_ch.send(i); - } - } - } - - fn sieve(primes: Chan) { - let mut ch = make::(1); - go!(ch, generate(ch)); - loop { - let prime = ch.recv().unwrap(); - primes.send(prime); - - let ch1 = make::(1); - go!(ch, ch1, prime, filter(ch, ch1, prime)); - ch = ch1; - } - } - - #[test] - fn main() { - let primes = make::(1); - go!(primes, sieve(primes)); - - let a = [ - 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, - 89, 97, - ]; - for item in a.iter() { - let x = primes.recv().unwrap(); - if x != *item { - println!("{} != {}", x, item); - panic!("fail"); - } - } - } -} - -// https://github.com/golang/go/blob/master/test/chan/zerosize.go -mod zerosize { - use super::*; - - #[test] - fn zero_size_struct() { - struct ZeroSize; - let _ = make::(0); - } - - #[test] - fn zero_size_array() { - let _ = make::<[u8; 0]>(0); - } -} - -// https://github.com/golang/go/blob/master/src/runtime/chan_test.go -mod chan_test { - use super::*; - - #[test] - fn test_chan() { - const N: i32 = 200; - - for cap in 0..N { - { - // Ensure that receive from empty chan blocks. - let c = make::(cap as usize); - - let recv1 = Arc::new(Mutex::new(false)); - go!(c, recv1, { - c.recv(); - *recv1.lock().unwrap() = true; - }); - - let recv2 = Arc::new(Mutex::new(false)); - go!(c, recv2, { - c.recv(); - *recv2.lock().unwrap() = true; - }); - - thread::sleep(ms(1)); - - if *recv1.lock().unwrap() || *recv2.lock().unwrap() { - panic!(); - } - - // Ensure that non-blocking receive does not block. - select! { - recv(c.rx()) -> _ => panic!(), - default => {} - } - select! { - recv(c.rx()) -> _ => panic!(), - default => {} - } - - c.send(0); - c.send(0); - } - - { - // Ensure that send to full chan blocks. - let c = make::(cap as usize); - for i in 0..cap { - c.send(i); - } - - let sent = Arc::new(Mutex::new(0)); - go!(sent, c, { - c.send(0); - *sent.lock().unwrap() = 1; - }); - - thread::sleep(ms(1)); - - if *sent.lock().unwrap() != 0 { - panic!(); - } - - // Ensure that non-blocking send does not block. - select! { - send(c.tx(), 0) -> _ => panic!(), - default => {} - } - c.recv(); - } - - { - // Ensure that we receive 0 from closed chan. - let c = make::(cap as usize); - for i in 0..cap { - c.send(i); - } - c.close(); - - for i in 0..cap { - let v = c.recv(); - if v != Some(i) { - panic!(); - } - } - - if c.recv() != None { - panic!(); - } - if c.try_recv() != None { - panic!(); - } - } - - { - // Ensure that close unblocks receive. - let c = make::(cap as usize); - let done = make::(0); - - go!(c, done, { - let v = c.try_recv(); - done.send(v.is_some()); - }); - - thread::sleep(ms(1)); - c.close(); - - if !done.recv().unwrap() { - // panic!(); - } - } - - { - // Send 100 integers, - // ensure that we receive them non-corrupted in FIFO order. - let c = make::(cap as usize); - go!(c, { - for i in 0..100 { - c.send(i); - } - }); - for i in 0..100 { - if c.recv() != Some(i) { - panic!(); - } - } - - // Same, but using recv2. - go!(c, { - for i in 0..100 { - c.send(i); - } - }); - for i in 0..100 { - if c.recv() != Some(i) { - panic!(); - } - } - } - } - } - - #[test] - fn test_nonblock_recv_race() { - const N: usize = 1000; - - for _ in 0..N { - let c = make::(1); - c.send(1); - - let t = go!(c, { - select! { - recv(c.rx()) -> _ => {} - default => panic!("chan is not ready"), - } - }); - - c.close(); - c.recv(); - t.join().unwrap(); - } - } - - #[test] - fn test_nonblock_select_race() { - const N: usize = 1000; - - let done = make::(1); - for _ in 0..N { - let c1 = make::(1); - let c2 = make::(1); - c1.send(1); - - go!(c1, c2, done, { - select! { - recv(c1.rx()) -> _ => {} - recv(c2.rx()) -> _ => {} - default => { - done.send(false); - return; - } - } - done.send(true); - }); - - c2.send(1); - select! { - recv(c1.rx()) -> _ => {} - default => {} - } - if !done.recv().unwrap() { - panic!("no chan is ready"); - } - } - } - - #[test] - fn test_nonblock_select_race2() { - const N: usize = 1000; - - let done = make::(1); - for _ in 0..N { - let c1 = make::(1); - let c2 = make::(0); - c1.send(1); - - go!(c1, c2, done, { - select! { - recv(c1.rx()) -> _ => {} - recv(c2.rx()) -> _ => {} - default => { - done.send(false); - return; - } - } - done.send(true); - }); - - c2.close(); - select! { - recv(c1.rx()) -> _ => {} - default => {} - } - if !done.recv().unwrap() { - panic!("no chan is ready"); - } - } - } - - #[test] - fn test_self_select() { - // Ensure that send/recv on the same chan in select - // does not crash nor deadlock. - - for &cap in &[0, 10] { - let wg = WaitGroup::new(); - wg.add(2); - let c = make::(cap); - - for p in 0..2 { - let p = p; - go!(wg, p, c, { - defer! { wg.done() } - for i in 0..1000 { - if p == 0 || i % 2 == 0 { - select! { - send(c.tx(), p) -> _ => {} - recv(c.rx()) -> v => { - if cap == 0 && v.ok() == Some(p) { - panic!("self receive"); - } - } - } - } else { - select! { - recv(c.rx()) -> v => { - if cap == 0 && v.ok() == Some(p) { - panic!("self receive"); - } - } - send(c.tx(), p) -> _ => {} - } - } - } - }); - } - wg.wait(); - } - } - - #[test] - fn test_select_stress() { - let c = vec![ - make::(0), - make::(0), - make::(2), - make::(3), - ]; - - const N: usize = 10000; - - // There are 4 goroutines that send N values on each of the chans, - // + 4 goroutines that receive N values on each of the chans, - // + 1 goroutine that sends N values on each of the chans in a single select, - // + 1 goroutine that receives N values on each of the chans in a single select. - // All these sends, receives and selects interact chaotically at runtime, - // but we are careful that this whole construct does not deadlock. - let wg = WaitGroup::new(); - wg.add(10); - - for k in 0..4 { - go!(k, c, wg, { - for _ in 0..N { - c[k].send(0); - } - wg.done(); - }); - go!(k, c, wg, { - for _ in 0..N { - c[k].recv(); - } - wg.done(); - }); - } - - go!(c, wg, { - let mut n = [0; 4]; - let mut c1 = c.iter().map(|c| Some(c.rx().clone())).collect::>(); - - for _ in 0..4 * N { - let index = { - let mut sel = Select::new(); - let mut opers = [!0; 4]; - for &i in &[3, 2, 0, 1] { - if let Some(c) = &c1[i] { - opers[i] = sel.recv(c); - } - } - - let oper = sel.select(); - let mut index = !0; - for i in 0..4 { - if opers[i] == oper.index() { - index = i; - let _ = oper.recv(c1[i].as_ref().unwrap()); - break; - } - } - index - }; - - n[index] += 1; - if n[index] == N { - c1[index] = None; - } - } - wg.done(); - }); - - go!(c, wg, { - let mut n = [0; 4]; - let mut c1 = c.iter().map(|c| Some(c.tx().clone())).collect::>(); - - for _ in 0..4 * N { - let index = { - let mut sel = Select::new(); - let mut opers = [!0; 4]; - for &i in &[0, 1, 2, 3] { - if let Some(c) = &c1[i] { - opers[i] = sel.send(c); - } - } - - let oper = sel.select(); - let mut index = !0; - for i in 0..4 { - if opers[i] == oper.index() { - index = i; - let _ = oper.send(c1[i].as_ref().unwrap(), 0); - break; - } - } - index - }; - - n[index] += 1; - if n[index] == N { - c1[index] = None; - } - } - wg.done(); - }); - - wg.wait(); - } - - #[test] - fn test_select_fairness() { - const TRIALS: usize = 10000; - - let c1 = make::(TRIALS + 1); - let c2 = make::(TRIALS + 1); - - for _ in 0..TRIALS + 1 { - c1.send(1); - c2.send(2); - } - - let c3 = make::(0); - let c4 = make::(0); - let out = make::(0); - let done = make::(0); - let wg = WaitGroup::new(); - - wg.add(1); - go!(wg, c1, c2, c3, c4, out, done, { - defer! { wg.done() }; - loop { - let b; - select! { - recv(c3.rx()) -> m => b = m.unwrap(), - recv(c4.rx()) -> m => b = m.unwrap(), - recv(c1.rx()) -> m => b = m.unwrap(), - recv(c2.rx()) -> m => b = m.unwrap(), - } - select! { - send(out.tx(), b) -> _ => {} - recv(done.rx()) -> _ => return, - } - } - }); - - let (mut cnt1, mut cnt2) = (0, 0); - for _ in 0..TRIALS { - match out.recv() { - Some(1) => cnt1 += 1, - Some(2) => cnt2 += 1, - b => panic!("unexpected value {:?} on channel", b), - } - } - - // If the select in the goroutine is fair, - // cnt1 and cnt2 should be about the same value. - // With 10,000 trials, the expected margin of error at - // a confidence level of five nines is 4.4172 / (2 * Sqrt(10000)). - - let r = cnt1 as f64 / TRIALS as f64; - let e = (r - 0.5).abs(); - - if e > 4.4172 / (2.0 * (TRIALS as f64).sqrt()) { - panic!( - "unfair select: in {} trials, results were {}, {}", - TRIALS, cnt1, cnt2, - ); - } - - done.close(); - wg.wait(); - } - - #[test] - fn test_chan_send_interface() { - struct Mt; - - let c = make::>(1); - c.send(Box::new(Mt)); - - select! { - send(c.tx(), Box::new(Mt)) -> _ => {} - default => {} - } - - select! { - send(c.tx(), Box::new(Mt)) -> _ => {} - send(c.tx(), Box::new(Mt)) -> _ => {} - default => {} - } - } - - #[test] - fn test_pseudo_random_send() { - const N: usize = 100; - - for cap in 0..N { - let c = make::(cap); - let l = Arc::new(Mutex::new(vec![0i32; N])); - let done = make::(0); - - go!(c, done, l, { - let mut l = l.lock().unwrap(); - for i in 0..N { - thread::yield_now(); - l[i] = c.recv().unwrap(); - } - done.send(true); - }); - - for _ in 0..N { - select! { - send(c.tx(), 1) -> _ => {} - send(c.tx(), 0) -> _ => {} - } - } - done.recv(); - - let mut n0 = 0; - let mut n1 = 0; - for &i in l.lock().unwrap().iter() { - n0 += (i + 1) % 2; - n1 += i; - } - - if n0 <= N as i32 / 10 || n1 <= N as i32 / 10 { - panic!( - "Want pseudorandom, got {} zeros and {} ones (chan cap {})", - n0, n1, cap, - ); - } - } - } - - #[test] - fn test_multi_consumer() { - const NWORK: usize = 23; - const NITER: usize = 271828; - - let pn = [2, 3, 7, 11, 13, 17, 19, 23, 27, 31]; - - let q = make::(NWORK * 3); - let r = make::(NWORK * 3); - - let wg = WaitGroup::new(); - for i in 0..NWORK { - wg.add(1); - let w = i; - go!(q, r, wg, pn, { - for v in &q { - if pn[w % pn.len()] == v { - thread::yield_now(); - } - r.send(v); - } - wg.done(); - }); - } - - let expect = Arc::new(Mutex::new(0)); - go!(q, r, expect, wg, pn, { - for i in 0..NITER { - let v = pn[i % pn.len()]; - *expect.lock().unwrap() += v; - q.send(v); - } - q.close(); - wg.wait(); - r.close(); - }); - - let mut n = 0; - let mut s = 0; - for v in &r { - n += 1; - s += v; - } - - if n != NITER || s != *expect.lock().unwrap() { - panic!(); - } - } - - #[test] - fn test_select_duplicate_channel() { - // This test makes sure we can queue a G on - // the same channel multiple times. - let c = make::(0); - let d = make::(0); - let e = make::(0); - - go!(c, d, e, { - select! { - recv(c.rx()) -> _ => {} - recv(d.rx()) -> _ => {} - recv(e.rx()) -> _ => {} - } - e.send(9); - }); - thread::sleep(ms(1)); - - go!(c, c.recv()); - thread::sleep(ms(1)); - - d.send(7); - e.recv(); - c.send(8); - } -} - -// https://github.com/golang/go/blob/master/test/closedchan.go -mod closedchan { - // TODO -} - -// https://github.com/golang/go/blob/master/src/runtime/chanbarrier_test.go -mod chanbarrier_test { - // TODO -} - -// https://github.com/golang/go/blob/master/src/runtime/race/testdata/chan_test.go -mod race_chan_test { - // TODO -} - -// https://github.com/golang/go/blob/master/test/ken/chan.go -mod chan { - // TODO -} - -// https://github.com/golang/go/blob/master/test/ken/chan1.go -mod chan1 { - // TODO -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/tests/iter.rs cargo-0.47.0/vendor/crossbeam-channel/tests/iter.rs --- cargo-0.44.1/vendor/crossbeam-channel/tests/iter.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/tests/iter.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,113 +0,0 @@ -//! Tests for iteration over receivers. - -extern crate crossbeam_channel; -extern crate crossbeam_utils; - -use crossbeam_channel::unbounded; -use crossbeam_utils::thread::scope; - -#[test] -fn nested_recv_iter() { - let (s, r) = unbounded::(); - let (total_s, total_r) = unbounded::(); - - scope(|scope| { - scope.spawn(move |_| { - let mut acc = 0; - for x in r.iter() { - acc += x; - } - total_s.send(acc).unwrap(); - }); - - s.send(3).unwrap(); - s.send(1).unwrap(); - s.send(2).unwrap(); - drop(s); - assert_eq!(total_r.recv().unwrap(), 6); - }) - .unwrap(); -} - -#[test] -fn recv_iter_break() { - let (s, r) = unbounded::(); - let (count_s, count_r) = unbounded(); - - scope(|scope| { - scope.spawn(move |_| { - let mut count = 0; - for x in r.iter() { - if count >= 3 { - break; - } else { - count += x; - } - } - count_s.send(count).unwrap(); - }); - - s.send(2).unwrap(); - s.send(2).unwrap(); - s.send(2).unwrap(); - let _ = s.send(2); - drop(s); - assert_eq!(count_r.recv().unwrap(), 4); - }) - .unwrap(); -} - -#[test] -fn recv_try_iter() { - let (request_s, request_r) = unbounded(); - let (response_s, response_r) = unbounded(); - - scope(|scope| { - scope.spawn(move |_| { - let mut count = 0; - loop { - for x in response_r.try_iter() { - count += x; - if count == 6 { - return; - } - } - request_s.send(()).unwrap(); - } - }); - - for _ in request_r.iter() { - if response_s.send(2).is_err() { - break; - } - } - }) - .unwrap(); -} - -#[test] -fn recv_into_iter_owned() { - let mut iter = { - let (s, r) = unbounded::(); - s.send(1).unwrap(); - s.send(2).unwrap(); - r.into_iter() - }; - - assert_eq!(iter.next().unwrap(), 1); - assert_eq!(iter.next().unwrap(), 2); - assert_eq!(iter.next().is_none(), true); -} - -#[test] -fn recv_into_iter_borrowed() { - let (s, r) = unbounded::(); - s.send(1).unwrap(); - s.send(2).unwrap(); - drop(s); - - let mut iter = (&r).into_iter(); - assert_eq!(iter.next().unwrap(), 1); - assert_eq!(iter.next().unwrap(), 2); - assert_eq!(iter.next().is_none(), true); -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/tests/list.rs cargo-0.47.0/vendor/crossbeam-channel/tests/list.rs --- cargo-0.44.1/vendor/crossbeam-channel/tests/list.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/tests/list.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,538 +0,0 @@ -//! Tests for the list channel flavor. - -#[macro_use] -extern crate crossbeam_channel; -extern crate crossbeam_utils; -extern crate rand; - -use std::any::Any; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::thread; -use std::time::Duration; - -use crossbeam_channel::{unbounded, Receiver}; -use crossbeam_channel::{RecvError, RecvTimeoutError, TryRecvError}; -use crossbeam_channel::{SendError, SendTimeoutError, TrySendError}; -use crossbeam_utils::thread::scope; -use rand::{thread_rng, Rng}; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn smoke() { - let (s, r) = unbounded(); - s.try_send(7).unwrap(); - assert_eq!(r.try_recv(), Ok(7)); - - s.send(8).unwrap(); - assert_eq!(r.recv(), Ok(8)); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout)); -} - -#[test] -fn capacity() { - let (s, r) = unbounded::<()>(); - assert_eq!(s.capacity(), None); - assert_eq!(r.capacity(), None); -} - -#[test] -fn len_empty_full() { - let (s, r) = unbounded(); - - assert_eq!(s.len(), 0); - assert_eq!(s.is_empty(), true); - assert_eq!(s.is_full(), false); - assert_eq!(r.len(), 0); - assert_eq!(r.is_empty(), true); - assert_eq!(r.is_full(), false); - - s.send(()).unwrap(); - - assert_eq!(s.len(), 1); - assert_eq!(s.is_empty(), false); - assert_eq!(s.is_full(), false); - assert_eq!(r.len(), 1); - assert_eq!(r.is_empty(), false); - assert_eq!(r.is_full(), false); - - r.recv().unwrap(); - - assert_eq!(s.len(), 0); - assert_eq!(s.is_empty(), true); - assert_eq!(s.is_full(), false); - assert_eq!(r.len(), 0); - assert_eq!(r.is_empty(), true); - assert_eq!(r.is_full(), false); -} - -#[test] -fn try_recv() { - let (s, r) = unbounded(); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - thread::sleep(ms(1500)); - assert_eq!(r.try_recv(), Ok(7)); - thread::sleep(ms(500)); - assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - s.send(7).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn recv() { - let (s, r) = unbounded(); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv(), Ok(7)); - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(8)); - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(9)); - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - s.send(7).unwrap(); - s.send(8).unwrap(); - s.send(9).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn recv_timeout() { - let (s, r) = unbounded::(); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout)); - assert_eq!(r.recv_timeout(ms(1000)), Ok(7)); - assert_eq!( - r.recv_timeout(ms(1000)), - Err(RecvTimeoutError::Disconnected) - ); - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - s.send(7).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn try_send() { - let (s, r) = unbounded(); - for i in 0..1000 { - assert_eq!(s.try_send(i), Ok(())); - } - - drop(r); - assert_eq!(s.try_send(777), Err(TrySendError::Disconnected(777))); -} - -#[test] -fn send() { - let (s, r) = unbounded(); - for i in 0..1000 { - assert_eq!(s.send(i), Ok(())); - } - - drop(r); - assert_eq!(s.send(777), Err(SendError(777))); -} - -#[test] -fn send_timeout() { - let (s, r) = unbounded(); - for i in 0..1000 { - assert_eq!(s.send_timeout(i, ms(i as u64)), Ok(())); - } - - drop(r); - assert_eq!( - s.send_timeout(777, ms(0)), - Err(SendTimeoutError::Disconnected(777)) - ); -} - -#[test] -fn send_after_disconnect() { - let (s, r) = unbounded(); - - s.send(1).unwrap(); - s.send(2).unwrap(); - s.send(3).unwrap(); - - drop(r); - - assert_eq!(s.send(4), Err(SendError(4))); - assert_eq!(s.try_send(5), Err(TrySendError::Disconnected(5))); - assert_eq!( - s.send_timeout(6, ms(0)), - Err(SendTimeoutError::Disconnected(6)) - ); -} - -#[test] -fn recv_after_disconnect() { - let (s, r) = unbounded(); - - s.send(1).unwrap(); - s.send(2).unwrap(); - s.send(3).unwrap(); - - drop(s); - - assert_eq!(r.recv(), Ok(1)); - assert_eq!(r.recv(), Ok(2)); - assert_eq!(r.recv(), Ok(3)); - assert_eq!(r.recv(), Err(RecvError)); -} - -#[test] -fn len() { - let (s, r) = unbounded(); - - assert_eq!(s.len(), 0); - assert_eq!(r.len(), 0); - - for i in 0..50 { - s.send(i).unwrap(); - assert_eq!(s.len(), i + 1); - } - - for i in 0..50 { - r.recv().unwrap(); - assert_eq!(r.len(), 50 - i - 1); - } - - assert_eq!(s.len(), 0); - assert_eq!(r.len(), 0); -} - -#[test] -fn disconnect_wakes_receiver() { - let (s, r) = unbounded::<()>(); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - drop(s); - }); - }) - .unwrap(); -} - -#[test] -fn spsc() { - const COUNT: usize = 100_000; - - let (s, r) = unbounded(); - - scope(|scope| { - scope.spawn(move |_| { - for i in 0..COUNT { - assert_eq!(r.recv(), Ok(i)); - } - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - for i in 0..COUNT { - s.send(i).unwrap(); - } - }); - }) - .unwrap(); -} - -#[test] -fn mpmc() { - const COUNT: usize = 25_000; - const THREADS: usize = 4; - - let (s, r) = unbounded::(); - let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..COUNT { - let n = r.recv().unwrap(); - v[n].fetch_add(1, Ordering::SeqCst); - } - }); - } - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..COUNT { - s.send(i).unwrap(); - } - }); - } - }) - .unwrap(); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - - for c in v { - assert_eq!(c.load(Ordering::SeqCst), THREADS); - } -} - -#[test] -fn stress_oneshot() { - const COUNT: usize = 10_000; - - for _ in 0..COUNT { - let (s, r) = unbounded(); - - scope(|scope| { - scope.spawn(|_| r.recv().unwrap()); - scope.spawn(|_| s.send(0).unwrap()); - }) - .unwrap(); - } -} - -#[test] -fn stress_iter() { - const COUNT: usize = 100_000; - - let (request_s, request_r) = unbounded(); - let (response_s, response_r) = unbounded(); - - scope(|scope| { - scope.spawn(move |_| { - let mut count = 0; - loop { - for x in response_r.try_iter() { - count += x; - if count == COUNT { - return; - } - } - request_s.send(()).unwrap(); - } - }); - - for _ in request_r.iter() { - if response_s.send(1).is_err() { - break; - } - } - }) - .unwrap(); -} - -#[test] -fn stress_timeout_two_threads() { - const COUNT: usize = 100; - - let (s, r) = unbounded(); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(50)); - } - s.send(i).unwrap(); - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(50)); - } - loop { - if let Ok(x) = r.recv_timeout(ms(10)) { - assert_eq!(x, i); - break; - } - } - } - }); - }) - .unwrap(); -} - -#[test] -fn drops() { - static DROPS: AtomicUsize = AtomicUsize::new(0); - - #[derive(Debug, PartialEq)] - struct DropCounter; - - impl Drop for DropCounter { - fn drop(&mut self) { - DROPS.fetch_add(1, Ordering::SeqCst); - } - } - - let mut rng = thread_rng(); - - for _ in 0..100 { - let steps = rng.gen_range(0, 10_000); - let additional = rng.gen_range(0, 1000); - - DROPS.store(0, Ordering::SeqCst); - let (s, r) = unbounded::(); - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..steps { - r.recv().unwrap(); - } - }); - - scope.spawn(|_| { - for _ in 0..steps { - s.send(DropCounter).unwrap(); - } - }); - }) - .unwrap(); - - for _ in 0..additional { - s.try_send(DropCounter).unwrap(); - } - - assert_eq!(DROPS.load(Ordering::SeqCst), steps); - drop(s); - drop(r); - assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); - } -} - -#[test] -fn linearizable() { - const COUNT: usize = 25_000; - const THREADS: usize = 4; - - let (s, r) = unbounded(); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..COUNT { - s.send(0).unwrap(); - r.try_recv().unwrap(); - } - }); - } - }) - .unwrap(); -} - -#[test] -fn fairness() { - const COUNT: usize = 10_000; - - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = unbounded::<()>(); - - for _ in 0..COUNT { - s1.send(()).unwrap(); - s2.send(()).unwrap(); - } - - let mut hits = [0usize; 2]; - for _ in 0..COUNT { - select! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); -} - -#[test] -fn fairness_duplicates() { - const COUNT: usize = 10_000; - - let (s, r) = unbounded(); - - for _ in 0..COUNT { - s.send(()).unwrap(); - } - - let mut hits = [0usize; 5]; - for _ in 0..COUNT { - select! { - recv(r) -> _ => hits[0] += 1, - recv(r) -> _ => hits[1] += 1, - recv(r) -> _ => hits[2] += 1, - recv(r) -> _ => hits[3] += 1, - recv(r) -> _ => hits[4] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); -} - -#[test] -fn recv_in_send() { - let (s, r) = unbounded(); - s.send(()).unwrap(); - - select! { - send(s, assert_eq!(r.recv(), Ok(()))) -> _ => {} - } -} - -#[test] -fn channel_through_channel() { - const COUNT: usize = 1000; - - type T = Box; - - let (s, r) = unbounded::(); - - scope(|scope| { - scope.spawn(move |_| { - let mut s = s; - - for _ in 0..COUNT { - let (new_s, new_r) = unbounded(); - let new_r: T = Box::new(Some(new_r)); - - s.send(new_r).unwrap(); - s = new_s; - } - }); - - scope.spawn(move |_| { - let mut r = r; - - for _ in 0..COUNT { - r = r - .recv() - .unwrap() - .downcast_mut::>>() - .unwrap() - .take() - .unwrap() - } - }); - }) - .unwrap(); -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/tests/mpsc.rs cargo-0.47.0/vendor/crossbeam-channel/tests/mpsc.rs --- cargo-0.44.1/vendor/crossbeam-channel/tests/mpsc.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/tests/mpsc.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2095 +0,0 @@ -//! Tests copied from `std::sync::mpsc`. -//! -//! This is a copy of tests for the `std::sync::mpsc` channels from the standard library, but -//! modified to work with `crossbeam-channel` instead. -//! -//! Minor tweaks were needed to make the tests compile: -//! -//! - Replace `box` syntax with `Box::new`. -//! - Replace all uses of `Select` with `select!`. -//! - Change the imports. -//! - Join all spawned threads. -//! - Removed assertion from oneshot_multi_thread_send_close_stress tests. -//! -//! Source: -//! - https://github.com/rust-lang/rust/tree/master/src/libstd/sync/mpsc -//! -//! Copyright & License: -//! - Copyright 2013-2014 The Rust Project Developers -//! - Apache License, Version 2.0 or MIT license, at your option -//! - https://github.com/rust-lang/rust/blob/master/COPYRIGHT -//! - https://www.rust-lang.org/en-US/legal.html - -#[macro_use] -extern crate crossbeam_channel as cc; - -use std::sync::mpsc::{RecvError, RecvTimeoutError, TryRecvError}; -use std::sync::mpsc::{SendError, TrySendError}; -use std::thread::JoinHandle; -use std::time::Duration; - -pub struct Sender { - pub inner: cc::Sender, -} - -impl Sender { - pub fn send(&self, t: T) -> Result<(), SendError> { - self.inner.send(t).map_err(|cc::SendError(m)| SendError(m)) - } -} - -impl Clone for Sender { - fn clone(&self) -> Sender { - Sender { - inner: self.inner.clone(), - } - } -} - -pub struct SyncSender { - pub inner: cc::Sender, -} - -impl SyncSender { - pub fn send(&self, t: T) -> Result<(), SendError> { - self.inner.send(t).map_err(|cc::SendError(m)| SendError(m)) - } - - pub fn try_send(&self, t: T) -> Result<(), TrySendError> { - self.inner.try_send(t).map_err(|err| match err { - cc::TrySendError::Full(m) => TrySendError::Full(m), - cc::TrySendError::Disconnected(m) => TrySendError::Disconnected(m), - }) - } -} - -impl Clone for SyncSender { - fn clone(&self) -> SyncSender { - SyncSender { - inner: self.inner.clone(), - } - } -} - -pub struct Receiver { - pub inner: cc::Receiver, -} - -impl Receiver { - pub fn try_recv(&self) -> Result { - self.inner.try_recv().map_err(|err| match err { - cc::TryRecvError::Empty => TryRecvError::Empty, - cc::TryRecvError::Disconnected => TryRecvError::Disconnected, - }) - } - - pub fn recv(&self) -> Result { - self.inner.recv().map_err(|_| RecvError) - } - - pub fn recv_timeout(&self, timeout: Duration) -> Result { - self.inner.recv_timeout(timeout).map_err(|err| match err { - cc::RecvTimeoutError::Timeout => RecvTimeoutError::Timeout, - cc::RecvTimeoutError::Disconnected => RecvTimeoutError::Disconnected, - }) - } - - pub fn iter(&self) -> Iter { - Iter { inner: self } - } - - pub fn try_iter(&self) -> TryIter { - TryIter { inner: self } - } -} - -impl<'a, T> IntoIterator for &'a Receiver { - type Item = T; - type IntoIter = Iter<'a, T>; - - fn into_iter(self) -> Iter<'a, T> { - self.iter() - } -} - -impl IntoIterator for Receiver { - type Item = T; - type IntoIter = IntoIter; - - fn into_iter(self) -> IntoIter { - IntoIter { inner: self } - } -} - -pub struct TryIter<'a, T: 'a> { - inner: &'a Receiver, -} - -impl<'a, T> Iterator for TryIter<'a, T> { - type Item = T; - - fn next(&mut self) -> Option { - self.inner.try_recv().ok() - } -} - -pub struct Iter<'a, T: 'a> { - inner: &'a Receiver, -} - -impl<'a, T> Iterator for Iter<'a, T> { - type Item = T; - - fn next(&mut self) -> Option { - self.inner.recv().ok() - } -} - -pub struct IntoIter { - inner: Receiver, -} - -impl Iterator for IntoIter { - type Item = T; - - fn next(&mut self) -> Option { - self.inner.recv().ok() - } -} - -pub fn channel() -> (Sender, Receiver) { - let (s, r) = cc::unbounded(); - let s = Sender { inner: s }; - let r = Receiver { inner: r }; - (s, r) -} - -pub fn sync_channel(bound: usize) -> (SyncSender, Receiver) { - let (s, r) = cc::bounded(bound); - let s = SyncSender { inner: s }; - let r = Receiver { inner: r }; - (s, r) -} - -macro_rules! select { - ( - $($name:pat = $rx:ident.$meth:ident() => $code:expr),+ - ) => ({ - crossbeam_channel_internal! { - $( - recv(($rx).inner) -> res => { - let $name = res.map_err(|_| ::std::sync::mpsc::RecvError); - $code - } - )+ - } - }) -} - -// Source: https://github.com/rust-lang/rust/blob/master/src/libstd/sync/mpsc/mod.rs -mod channel_tests { - use super::*; - - use std::env; - use std::thread; - use std::time::{Duration, Instant}; - - pub fn stress_factor() -> usize { - match env::var("RUST_TEST_STRESS") { - Ok(val) => val.parse().unwrap(), - Err(..) => 1, - } - } - - #[test] - fn smoke() { - let (tx, rx) = channel::(); - tx.send(1).unwrap(); - assert_eq!(rx.recv().unwrap(), 1); - } - - #[test] - fn drop_full() { - let (tx, _rx) = channel::>(); - tx.send(Box::new(1)).unwrap(); - } - - #[test] - fn drop_full_shared() { - let (tx, _rx) = channel::>(); - drop(tx.clone()); - drop(tx.clone()); - tx.send(Box::new(1)).unwrap(); - } - - #[test] - fn smoke_shared() { - let (tx, rx) = channel::(); - tx.send(1).unwrap(); - assert_eq!(rx.recv().unwrap(), 1); - let tx = tx.clone(); - tx.send(1).unwrap(); - assert_eq!(rx.recv().unwrap(), 1); - } - - #[test] - fn smoke_threads() { - let (tx, rx) = channel::(); - let t = thread::spawn(move || { - tx.send(1).unwrap(); - }); - assert_eq!(rx.recv().unwrap(), 1); - t.join().unwrap(); - } - - #[test] - fn smoke_port_gone() { - let (tx, rx) = channel::(); - drop(rx); - assert!(tx.send(1).is_err()); - } - - #[test] - fn smoke_shared_port_gone() { - let (tx, rx) = channel::(); - drop(rx); - assert!(tx.send(1).is_err()) - } - - #[test] - fn smoke_shared_port_gone2() { - let (tx, rx) = channel::(); - drop(rx); - let tx2 = tx.clone(); - drop(tx); - assert!(tx2.send(1).is_err()); - } - - #[test] - fn port_gone_concurrent() { - let (tx, rx) = channel::(); - let t = thread::spawn(move || { - rx.recv().unwrap(); - }); - while tx.send(1).is_ok() {} - t.join().unwrap(); - } - - #[test] - fn port_gone_concurrent_shared() { - let (tx, rx) = channel::(); - let tx2 = tx.clone(); - let t = thread::spawn(move || { - rx.recv().unwrap(); - }); - while tx.send(1).is_ok() && tx2.send(1).is_ok() {} - t.join().unwrap(); - } - - #[test] - fn smoke_chan_gone() { - let (tx, rx) = channel::(); - drop(tx); - assert!(rx.recv().is_err()); - } - - #[test] - fn smoke_chan_gone_shared() { - let (tx, rx) = channel::<()>(); - let tx2 = tx.clone(); - drop(tx); - drop(tx2); - assert!(rx.recv().is_err()); - } - - #[test] - fn chan_gone_concurrent() { - let (tx, rx) = channel::(); - let t = thread::spawn(move || { - tx.send(1).unwrap(); - tx.send(1).unwrap(); - }); - while rx.recv().is_ok() {} - t.join().unwrap(); - } - - #[test] - fn stress() { - let (tx, rx) = channel::(); - let t = thread::spawn(move || { - for _ in 0..10000 { - tx.send(1).unwrap(); - } - }); - for _ in 0..10000 { - assert_eq!(rx.recv().unwrap(), 1); - } - t.join().ok().unwrap(); - } - - #[test] - fn stress_shared() { - const AMT: u32 = 10000; - const NTHREADS: u32 = 8; - let (tx, rx) = channel::(); - - let t = thread::spawn(move || { - for _ in 0..AMT * NTHREADS { - assert_eq!(rx.recv().unwrap(), 1); - } - match rx.try_recv() { - Ok(..) => panic!(), - _ => {} - } - }); - - let mut ts = Vec::with_capacity(NTHREADS as usize); - for _ in 0..NTHREADS { - let tx = tx.clone(); - let t = thread::spawn(move || { - for _ in 0..AMT { - tx.send(1).unwrap(); - } - }); - ts.push(t); - } - drop(tx); - t.join().ok().unwrap(); - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn send_from_outside_runtime() { - let (tx1, rx1) = channel::<()>(); - let (tx2, rx2) = channel::(); - let t1 = thread::spawn(move || { - tx1.send(()).unwrap(); - for _ in 0..40 { - assert_eq!(rx2.recv().unwrap(), 1); - } - }); - rx1.recv().unwrap(); - let t2 = thread::spawn(move || { - for _ in 0..40 { - tx2.send(1).unwrap(); - } - }); - t1.join().ok().unwrap(); - t2.join().ok().unwrap(); - } - - #[test] - fn recv_from_outside_runtime() { - let (tx, rx) = channel::(); - let t = thread::spawn(move || { - for _ in 0..40 { - assert_eq!(rx.recv().unwrap(), 1); - } - }); - for _ in 0..40 { - tx.send(1).unwrap(); - } - t.join().ok().unwrap(); - } - - #[test] - fn no_runtime() { - let (tx1, rx1) = channel::(); - let (tx2, rx2) = channel::(); - let t1 = thread::spawn(move || { - assert_eq!(rx1.recv().unwrap(), 1); - tx2.send(2).unwrap(); - }); - let t2 = thread::spawn(move || { - tx1.send(1).unwrap(); - assert_eq!(rx2.recv().unwrap(), 2); - }); - t1.join().ok().unwrap(); - t2.join().ok().unwrap(); - } - - #[test] - fn oneshot_single_thread_close_port_first() { - // Simple test of closing without sending - let (_tx, rx) = channel::(); - drop(rx); - } - - #[test] - fn oneshot_single_thread_close_chan_first() { - // Simple test of closing without sending - let (tx, _rx) = channel::(); - drop(tx); - } - - #[test] - fn oneshot_single_thread_send_port_close() { - // Testing that the sender cleans up the payload if receiver is closed - let (tx, rx) = channel::>(); - drop(rx); - assert!(tx.send(Box::new(0)).is_err()); - } - - #[test] - fn oneshot_single_thread_recv_chan_close() { - let (tx, rx) = channel::(); - drop(tx); - assert_eq!(rx.recv(), Err(RecvError)); - } - - #[test] - fn oneshot_single_thread_send_then_recv() { - let (tx, rx) = channel::>(); - tx.send(Box::new(10)).unwrap(); - assert!(*rx.recv().unwrap() == 10); - } - - #[test] - fn oneshot_single_thread_try_send_open() { - let (tx, rx) = channel::(); - assert!(tx.send(10).is_ok()); - assert!(rx.recv().unwrap() == 10); - } - - #[test] - fn oneshot_single_thread_try_send_closed() { - let (tx, rx) = channel::(); - drop(rx); - assert!(tx.send(10).is_err()); - } - - #[test] - fn oneshot_single_thread_try_recv_open() { - let (tx, rx) = channel::(); - tx.send(10).unwrap(); - assert!(rx.recv() == Ok(10)); - } - - #[test] - fn oneshot_single_thread_try_recv_closed() { - let (tx, rx) = channel::(); - drop(tx); - assert!(rx.recv().is_err()); - } - - #[test] - fn oneshot_single_thread_peek_data() { - let (tx, rx) = channel::(); - assert_eq!(rx.try_recv(), Err(TryRecvError::Empty)); - tx.send(10).unwrap(); - assert_eq!(rx.try_recv(), Ok(10)); - } - - #[test] - fn oneshot_single_thread_peek_close() { - let (tx, rx) = channel::(); - drop(tx); - assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected)); - assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected)); - } - - #[test] - fn oneshot_single_thread_peek_open() { - let (_tx, rx) = channel::(); - assert_eq!(rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[test] - fn oneshot_multi_task_recv_then_send() { - let (tx, rx) = channel::>(); - let t = thread::spawn(move || { - assert!(*rx.recv().unwrap() == 10); - }); - - tx.send(Box::new(10)).unwrap(); - t.join().unwrap(); - } - - #[test] - fn oneshot_multi_task_recv_then_close() { - let (tx, rx) = channel::>(); - let t = thread::spawn(move || { - drop(tx); - }); - thread::spawn(move || { - assert_eq!(rx.recv(), Err(RecvError)); - }) - .join() - .unwrap(); - t.join().unwrap(); - } - - #[test] - fn oneshot_multi_thread_close_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = channel::(); - let t = thread::spawn(move || { - drop(rx); - }); - ts.push(t); - drop(tx); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn oneshot_multi_thread_send_close_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(2 * stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = channel::(); - let t = thread::spawn(move || { - drop(rx); - }); - ts.push(t); - thread::spawn(move || { - let _ = tx.send(1); - }) - .join() - .unwrap(); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn oneshot_multi_thread_recv_close_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(2 * stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = channel::(); - let t = thread::spawn(move || { - thread::spawn(move || { - assert_eq!(rx.recv(), Err(RecvError)); - }) - .join() - .unwrap(); - }); - ts.push(t); - let t2 = thread::spawn(move || { - let t = thread::spawn(move || { - drop(tx); - }); - t.join().unwrap(); - }); - ts.push(t2); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn oneshot_multi_thread_send_recv_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = channel::>(); - let t = thread::spawn(move || { - tx.send(Box::new(10)).unwrap(); - }); - ts.push(t); - assert!(*rx.recv().unwrap() == 10); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn stream_send_recv_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(2 * stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = channel(); - - if let Some(t) = send(tx, 0) { - ts.push(t); - } - if let Some(t2) = recv(rx, 0) { - ts.push(t2); - } - - fn send(tx: Sender>, i: i32) -> Option> { - if i == 10 { - return None; - } - - Some(thread::spawn(move || { - tx.send(Box::new(i)).unwrap(); - send(tx, i + 1); - })) - } - - fn recv(rx: Receiver>, i: i32) -> Option> { - if i == 10 { - return None; - } - - Some(thread::spawn(move || { - assert!(*rx.recv().unwrap() == i); - recv(rx, i + 1); - })) - } - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn oneshot_single_thread_recv_timeout() { - let (tx, rx) = channel(); - tx.send(()).unwrap(); - assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Ok(())); - assert_eq!( - rx.recv_timeout(Duration::from_millis(1)), - Err(RecvTimeoutError::Timeout) - ); - tx.send(()).unwrap(); - assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Ok(())); - } - - #[test] - fn stress_recv_timeout_two_threads() { - let (tx, rx) = channel(); - let stress = stress_factor() + 100; - let timeout = Duration::from_millis(100); - - let t = thread::spawn(move || { - for i in 0..stress { - if i % 2 == 0 { - thread::sleep(timeout * 2); - } - tx.send(1usize).unwrap(); - } - }); - - let mut recv_count = 0; - loop { - match rx.recv_timeout(timeout) { - Ok(n) => { - assert_eq!(n, 1usize); - recv_count += 1; - } - Err(RecvTimeoutError::Timeout) => continue, - Err(RecvTimeoutError::Disconnected) => break, - } - } - - assert_eq!(recv_count, stress); - t.join().unwrap() - } - - #[test] - fn recv_timeout_upgrade() { - let (tx, rx) = channel::<()>(); - let timeout = Duration::from_millis(1); - let _tx_clone = tx.clone(); - - let start = Instant::now(); - assert_eq!(rx.recv_timeout(timeout), Err(RecvTimeoutError::Timeout)); - assert!(Instant::now() >= start + timeout); - } - - #[test] - fn stress_recv_timeout_shared() { - let (tx, rx) = channel(); - let stress = stress_factor() + 100; - - let mut ts = Vec::with_capacity(stress); - for i in 0..stress { - let tx = tx.clone(); - let t = thread::spawn(move || { - thread::sleep(Duration::from_millis(i as u64 * 10)); - tx.send(1usize).unwrap(); - }); - ts.push(t); - } - - drop(tx); - - let mut recv_count = 0; - loop { - match rx.recv_timeout(Duration::from_millis(10)) { - Ok(n) => { - assert_eq!(n, 1usize); - recv_count += 1; - } - Err(RecvTimeoutError::Timeout) => continue, - Err(RecvTimeoutError::Disconnected) => break, - } - } - - assert_eq!(recv_count, stress); - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn recv_a_lot() { - // Regression test that we don't run out of stack in scheduler context - let (tx, rx) = channel(); - for _ in 0..10000 { - tx.send(()).unwrap(); - } - for _ in 0..10000 { - rx.recv().unwrap(); - } - } - - #[test] - fn shared_recv_timeout() { - let (tx, rx) = channel(); - let total = 5; - let mut ts = Vec::with_capacity(total); - for _ in 0..total { - let tx = tx.clone(); - let t = thread::spawn(move || { - tx.send(()).unwrap(); - }); - ts.push(t); - } - - for _ in 0..total { - rx.recv().unwrap(); - } - - assert_eq!( - rx.recv_timeout(Duration::from_millis(1)), - Err(RecvTimeoutError::Timeout) - ); - tx.send(()).unwrap(); - assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Ok(())); - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn shared_chan_stress() { - let (tx, rx) = channel(); - let total = stress_factor() + 100; - let mut ts = Vec::with_capacity(total); - for _ in 0..total { - let tx = tx.clone(); - let t = thread::spawn(move || { - tx.send(()).unwrap(); - }); - ts.push(t); - } - - for _ in 0..total { - rx.recv().unwrap(); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn test_nested_recv_iter() { - let (tx, rx) = channel::(); - let (total_tx, total_rx) = channel::(); - - let t = thread::spawn(move || { - let mut acc = 0; - for x in rx.iter() { - acc += x; - } - total_tx.send(acc).unwrap(); - }); - - tx.send(3).unwrap(); - tx.send(1).unwrap(); - tx.send(2).unwrap(); - drop(tx); - assert_eq!(total_rx.recv().unwrap(), 6); - t.join().unwrap(); - } - - #[test] - fn test_recv_iter_break() { - let (tx, rx) = channel::(); - let (count_tx, count_rx) = channel(); - - let t = thread::spawn(move || { - let mut count = 0; - for x in rx.iter() { - if count >= 3 { - break; - } else { - count += x; - } - } - count_tx.send(count).unwrap(); - }); - - tx.send(2).unwrap(); - tx.send(2).unwrap(); - tx.send(2).unwrap(); - let _ = tx.send(2); - drop(tx); - assert_eq!(count_rx.recv().unwrap(), 4); - t.join().unwrap(); - } - - #[test] - fn test_recv_try_iter() { - let (request_tx, request_rx) = channel(); - let (response_tx, response_rx) = channel(); - - // Request `x`s until we have `6`. - let t = thread::spawn(move || { - let mut count = 0; - loop { - for x in response_rx.try_iter() { - count += x; - if count == 6 { - return count; - } - } - request_tx.send(()).unwrap(); - } - }); - - for _ in request_rx.iter() { - if response_tx.send(2).is_err() { - break; - } - } - - assert_eq!(t.join().unwrap(), 6); - } - - #[test] - fn test_recv_into_iter_owned() { - let mut iter = { - let (tx, rx) = channel::(); - tx.send(1).unwrap(); - tx.send(2).unwrap(); - - rx.into_iter() - }; - assert_eq!(iter.next().unwrap(), 1); - assert_eq!(iter.next().unwrap(), 2); - assert_eq!(iter.next().is_none(), true); - } - - #[test] - fn test_recv_into_iter_borrowed() { - let (tx, rx) = channel::(); - tx.send(1).unwrap(); - tx.send(2).unwrap(); - drop(tx); - let mut iter = (&rx).into_iter(); - assert_eq!(iter.next().unwrap(), 1); - assert_eq!(iter.next().unwrap(), 2); - assert_eq!(iter.next().is_none(), true); - } - - #[test] - fn try_recv_states() { - let (tx1, rx1) = channel::(); - let (tx2, rx2) = channel::<()>(); - let (tx3, rx3) = channel::<()>(); - let t = thread::spawn(move || { - rx2.recv().unwrap(); - tx1.send(1).unwrap(); - tx3.send(()).unwrap(); - rx2.recv().unwrap(); - drop(tx1); - tx3.send(()).unwrap(); - }); - - assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty)); - tx2.send(()).unwrap(); - rx3.recv().unwrap(); - assert_eq!(rx1.try_recv(), Ok(1)); - assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty)); - tx2.send(()).unwrap(); - rx3.recv().unwrap(); - assert_eq!(rx1.try_recv(), Err(TryRecvError::Disconnected)); - t.join().unwrap(); - } - - // This bug used to end up in a livelock inside of the Receiver destructor - // because the internal state of the Shared packet was corrupted - #[test] - fn destroy_upgraded_shared_port_when_sender_still_active() { - let (tx, rx) = channel(); - let (tx2, rx2) = channel(); - let t = thread::spawn(move || { - rx.recv().unwrap(); // wait on a oneshot - drop(rx); // destroy a shared - tx2.send(()).unwrap(); - }); - // make sure the other thread has gone to sleep - for _ in 0..5000 { - thread::yield_now(); - } - - // upgrade to a shared chan and send a message - let tx2 = tx.clone(); - drop(tx); - tx2.send(()).unwrap(); - - // wait for the child thread to exit before we exit - rx2.recv().unwrap(); - t.join().unwrap(); - } - - #[test] - fn issue_32114() { - let (tx, _) = channel(); - let _ = tx.send(123); - assert_eq!(tx.send(123), Err(SendError(123))); - } -} - -// Source: https://github.com/rust-lang/rust/blob/master/src/libstd/sync/mpsc/mod.rs -mod sync_channel_tests { - use super::*; - - use std::env; - use std::thread; - use std::time::Duration; - - pub fn stress_factor() -> usize { - match env::var("RUST_TEST_STRESS") { - Ok(val) => val.parse().unwrap(), - Err(..) => 1, - } - } - - #[test] - fn smoke() { - let (tx, rx) = sync_channel::(1); - tx.send(1).unwrap(); - assert_eq!(rx.recv().unwrap(), 1); - } - - #[test] - fn drop_full() { - let (tx, _rx) = sync_channel::>(1); - tx.send(Box::new(1)).unwrap(); - } - - #[test] - fn smoke_shared() { - let (tx, rx) = sync_channel::(1); - tx.send(1).unwrap(); - assert_eq!(rx.recv().unwrap(), 1); - let tx = tx.clone(); - tx.send(1).unwrap(); - assert_eq!(rx.recv().unwrap(), 1); - } - - #[test] - fn recv_timeout() { - let (tx, rx) = sync_channel::(1); - assert_eq!( - rx.recv_timeout(Duration::from_millis(1)), - Err(RecvTimeoutError::Timeout) - ); - tx.send(1).unwrap(); - assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Ok(1)); - } - - #[test] - fn smoke_threads() { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - tx.send(1).unwrap(); - }); - assert_eq!(rx.recv().unwrap(), 1); - t.join().unwrap(); - } - - #[test] - fn smoke_port_gone() { - let (tx, rx) = sync_channel::(0); - drop(rx); - assert!(tx.send(1).is_err()); - } - - #[test] - fn smoke_shared_port_gone2() { - let (tx, rx) = sync_channel::(0); - drop(rx); - let tx2 = tx.clone(); - drop(tx); - assert!(tx2.send(1).is_err()); - } - - #[test] - fn port_gone_concurrent() { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - rx.recv().unwrap(); - }); - while tx.send(1).is_ok() {} - t.join().unwrap(); - } - - #[test] - fn port_gone_concurrent_shared() { - let (tx, rx) = sync_channel::(0); - let tx2 = tx.clone(); - let t = thread::spawn(move || { - rx.recv().unwrap(); - }); - while tx.send(1).is_ok() && tx2.send(1).is_ok() {} - t.join().unwrap(); - } - - #[test] - fn smoke_chan_gone() { - let (tx, rx) = sync_channel::(0); - drop(tx); - assert!(rx.recv().is_err()); - } - - #[test] - fn smoke_chan_gone_shared() { - let (tx, rx) = sync_channel::<()>(0); - let tx2 = tx.clone(); - drop(tx); - drop(tx2); - assert!(rx.recv().is_err()); - } - - #[test] - fn chan_gone_concurrent() { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - tx.send(1).unwrap(); - tx.send(1).unwrap(); - }); - while rx.recv().is_ok() {} - t.join().unwrap(); - } - - #[test] - fn stress() { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - for _ in 0..10000 { - tx.send(1).unwrap(); - } - }); - for _ in 0..10000 { - assert_eq!(rx.recv().unwrap(), 1); - } - t.join().unwrap(); - } - - #[test] - fn stress_recv_timeout_two_threads() { - let (tx, rx) = sync_channel::(0); - - let t = thread::spawn(move || { - for _ in 0..10000 { - tx.send(1).unwrap(); - } - }); - - let mut recv_count = 0; - loop { - match rx.recv_timeout(Duration::from_millis(1)) { - Ok(v) => { - assert_eq!(v, 1); - recv_count += 1; - } - Err(RecvTimeoutError::Timeout) => continue, - Err(RecvTimeoutError::Disconnected) => break, - } - } - - assert_eq!(recv_count, 10000); - t.join().unwrap(); - } - - #[test] - fn stress_recv_timeout_shared() { - const AMT: u32 = 1000; - const NTHREADS: u32 = 8; - let (tx, rx) = sync_channel::(0); - let (dtx, drx) = sync_channel::<()>(0); - - let t = thread::spawn(move || { - let mut recv_count = 0; - loop { - match rx.recv_timeout(Duration::from_millis(10)) { - Ok(v) => { - assert_eq!(v, 1); - recv_count += 1; - } - Err(RecvTimeoutError::Timeout) => continue, - Err(RecvTimeoutError::Disconnected) => break, - } - } - - assert_eq!(recv_count, AMT * NTHREADS); - assert!(rx.try_recv().is_err()); - - dtx.send(()).unwrap(); - }); - - let mut ts = Vec::with_capacity(NTHREADS as usize); - for _ in 0..NTHREADS { - let tx = tx.clone(); - let t = thread::spawn(move || { - for _ in 0..AMT { - tx.send(1).unwrap(); - } - }); - ts.push(t); - } - - drop(tx); - - drx.recv().unwrap(); - for t in ts { - t.join().unwrap(); - } - t.join().unwrap(); - } - - #[test] - fn stress_shared() { - const AMT: u32 = 1000; - const NTHREADS: u32 = 8; - let (tx, rx) = sync_channel::(0); - let (dtx, drx) = sync_channel::<()>(0); - - let t = thread::spawn(move || { - for _ in 0..AMT * NTHREADS { - assert_eq!(rx.recv().unwrap(), 1); - } - match rx.try_recv() { - Ok(..) => panic!(), - _ => {} - } - dtx.send(()).unwrap(); - }); - - let mut ts = Vec::with_capacity(NTHREADS as usize); - for _ in 0..NTHREADS { - let tx = tx.clone(); - let t = thread::spawn(move || { - for _ in 0..AMT { - tx.send(1).unwrap(); - } - }); - ts.push(t); - } - drop(tx); - drx.recv().unwrap(); - for t in ts { - t.join().unwrap(); - } - t.join().unwrap(); - } - - #[test] - fn oneshot_single_thread_close_port_first() { - // Simple test of closing without sending - let (_tx, rx) = sync_channel::(0); - drop(rx); - } - - #[test] - fn oneshot_single_thread_close_chan_first() { - // Simple test of closing without sending - let (tx, _rx) = sync_channel::(0); - drop(tx); - } - - #[test] - fn oneshot_single_thread_send_port_close() { - // Testing that the sender cleans up the payload if receiver is closed - let (tx, rx) = sync_channel::>(0); - drop(rx); - assert!(tx.send(Box::new(0)).is_err()); - } - - #[test] - fn oneshot_single_thread_recv_chan_close() { - let (tx, rx) = sync_channel::(0); - drop(tx); - assert_eq!(rx.recv(), Err(RecvError)); - } - - #[test] - fn oneshot_single_thread_send_then_recv() { - let (tx, rx) = sync_channel::>(1); - tx.send(Box::new(10)).unwrap(); - assert!(*rx.recv().unwrap() == 10); - } - - #[test] - fn oneshot_single_thread_try_send_open() { - let (tx, rx) = sync_channel::(1); - assert_eq!(tx.try_send(10), Ok(())); - assert!(rx.recv().unwrap() == 10); - } - - #[test] - fn oneshot_single_thread_try_send_closed() { - let (tx, rx) = sync_channel::(0); - drop(rx); - assert_eq!(tx.try_send(10), Err(TrySendError::Disconnected(10))); - } - - #[test] - fn oneshot_single_thread_try_send_closed2() { - let (tx, _rx) = sync_channel::(0); - assert_eq!(tx.try_send(10), Err(TrySendError::Full(10))); - } - - #[test] - fn oneshot_single_thread_try_recv_open() { - let (tx, rx) = sync_channel::(1); - tx.send(10).unwrap(); - assert!(rx.recv() == Ok(10)); - } - - #[test] - fn oneshot_single_thread_try_recv_closed() { - let (tx, rx) = sync_channel::(0); - drop(tx); - assert!(rx.recv().is_err()); - } - - #[test] - fn oneshot_single_thread_try_recv_closed_with_data() { - let (tx, rx) = sync_channel::(1); - tx.send(10).unwrap(); - drop(tx); - assert_eq!(rx.try_recv(), Ok(10)); - assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected)); - } - - #[test] - fn oneshot_single_thread_peek_data() { - let (tx, rx) = sync_channel::(1); - assert_eq!(rx.try_recv(), Err(TryRecvError::Empty)); - tx.send(10).unwrap(); - assert_eq!(rx.try_recv(), Ok(10)); - } - - #[test] - fn oneshot_single_thread_peek_close() { - let (tx, rx) = sync_channel::(0); - drop(tx); - assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected)); - assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected)); - } - - #[test] - fn oneshot_single_thread_peek_open() { - let (_tx, rx) = sync_channel::(0); - assert_eq!(rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[test] - fn oneshot_multi_task_recv_then_send() { - let (tx, rx) = sync_channel::>(0); - let t = thread::spawn(move || { - assert!(*rx.recv().unwrap() == 10); - }); - - tx.send(Box::new(10)).unwrap(); - t.join().unwrap(); - } - - #[test] - fn oneshot_multi_task_recv_then_close() { - let (tx, rx) = sync_channel::>(0); - let t = thread::spawn(move || { - drop(tx); - }); - thread::spawn(move || { - assert_eq!(rx.recv(), Err(RecvError)); - }) - .join() - .unwrap(); - t.join().unwrap(); - } - - #[test] - fn oneshot_multi_thread_close_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - drop(rx); - }); - ts.push(t); - drop(tx); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn oneshot_multi_thread_send_close_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - drop(rx); - }); - ts.push(t); - thread::spawn(move || { - let _ = tx.send(1); - }) - .join() - .unwrap(); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn oneshot_multi_thread_recv_close_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(2 * stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - thread::spawn(move || { - assert_eq!(rx.recv(), Err(RecvError)); - }) - .join() - .unwrap(); - }); - ts.push(t); - let t2 = thread::spawn(move || { - thread::spawn(move || { - drop(tx); - }); - }); - ts.push(t2); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn oneshot_multi_thread_send_recv_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = sync_channel::>(0); - let t = thread::spawn(move || { - tx.send(Box::new(10)).unwrap(); - }); - ts.push(t); - assert!(*rx.recv().unwrap() == 10); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn stream_send_recv_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(2 * stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = sync_channel::>(0); - - if let Some(t) = send(tx, 0) { - ts.push(t); - } - if let Some(t) = recv(rx, 0) { - ts.push(t); - } - - fn send(tx: SyncSender>, i: i32) -> Option> { - if i == 10 { - return None; - } - - Some(thread::spawn(move || { - tx.send(Box::new(i)).unwrap(); - send(tx, i + 1); - })) - } - - fn recv(rx: Receiver>, i: i32) -> Option> { - if i == 10 { - return None; - } - - Some(thread::spawn(move || { - assert!(*rx.recv().unwrap() == i); - recv(rx, i + 1); - })) - } - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn recv_a_lot() { - // Regression test that we don't run out of stack in scheduler context - let (tx, rx) = sync_channel(10000); - for _ in 0..10000 { - tx.send(()).unwrap(); - } - for _ in 0..10000 { - rx.recv().unwrap(); - } - } - - #[test] - fn shared_chan_stress() { - let (tx, rx) = sync_channel(0); - let total = stress_factor() + 100; - let mut ts = Vec::with_capacity(total); - for _ in 0..total { - let tx = tx.clone(); - let t = thread::spawn(move || { - tx.send(()).unwrap(); - }); - ts.push(t); - } - - for _ in 0..total { - rx.recv().unwrap(); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn test_nested_recv_iter() { - let (tx, rx) = sync_channel::(0); - let (total_tx, total_rx) = sync_channel::(0); - - let t = thread::spawn(move || { - let mut acc = 0; - for x in rx.iter() { - acc += x; - } - total_tx.send(acc).unwrap(); - }); - - tx.send(3).unwrap(); - tx.send(1).unwrap(); - tx.send(2).unwrap(); - drop(tx); - assert_eq!(total_rx.recv().unwrap(), 6); - t.join().unwrap(); - } - - #[test] - fn test_recv_iter_break() { - let (tx, rx) = sync_channel::(0); - let (count_tx, count_rx) = sync_channel(0); - - let t = thread::spawn(move || { - let mut count = 0; - for x in rx.iter() { - if count >= 3 { - break; - } else { - count += x; - } - } - count_tx.send(count).unwrap(); - }); - - tx.send(2).unwrap(); - tx.send(2).unwrap(); - tx.send(2).unwrap(); - let _ = tx.try_send(2); - drop(tx); - assert_eq!(count_rx.recv().unwrap(), 4); - t.join().unwrap(); - } - - #[test] - fn try_recv_states() { - let (tx1, rx1) = sync_channel::(1); - let (tx2, rx2) = sync_channel::<()>(1); - let (tx3, rx3) = sync_channel::<()>(1); - let t = thread::spawn(move || { - rx2.recv().unwrap(); - tx1.send(1).unwrap(); - tx3.send(()).unwrap(); - rx2.recv().unwrap(); - drop(tx1); - tx3.send(()).unwrap(); - }); - - assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty)); - tx2.send(()).unwrap(); - rx3.recv().unwrap(); - assert_eq!(rx1.try_recv(), Ok(1)); - assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty)); - tx2.send(()).unwrap(); - rx3.recv().unwrap(); - assert_eq!(rx1.try_recv(), Err(TryRecvError::Disconnected)); - t.join().unwrap(); - } - - // This bug used to end up in a livelock inside of the Receiver destructor - // because the internal state of the Shared packet was corrupted - #[test] - fn destroy_upgraded_shared_port_when_sender_still_active() { - let (tx, rx) = sync_channel::<()>(0); - let (tx2, rx2) = sync_channel::<()>(0); - let t = thread::spawn(move || { - rx.recv().unwrap(); // wait on a oneshot - drop(rx); // destroy a shared - tx2.send(()).unwrap(); - }); - // make sure the other thread has gone to sleep - for _ in 0..5000 { - thread::yield_now(); - } - - // upgrade to a shared chan and send a message - let tx2 = tx.clone(); - drop(tx); - tx2.send(()).unwrap(); - - // wait for the child thread to exit before we exit - rx2.recv().unwrap(); - t.join().unwrap(); - } - - #[test] - fn send1() { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - rx.recv().unwrap(); - }); - assert_eq!(tx.send(1), Ok(())); - t.join().unwrap(); - } - - #[test] - fn send2() { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - drop(rx); - }); - assert!(tx.send(1).is_err()); - t.join().unwrap(); - } - - #[test] - fn send3() { - let (tx, rx) = sync_channel::(1); - assert_eq!(tx.send(1), Ok(())); - let t = thread::spawn(move || { - drop(rx); - }); - assert!(tx.send(1).is_err()); - t.join().unwrap(); - } - - #[test] - fn send4() { - let (tx, rx) = sync_channel::(0); - let tx2 = tx.clone(); - let (done, donerx) = channel(); - let done2 = done.clone(); - let t = thread::spawn(move || { - assert!(tx.send(1).is_err()); - done.send(()).unwrap(); - }); - let t2 = thread::spawn(move || { - assert!(tx2.send(2).is_err()); - done2.send(()).unwrap(); - }); - drop(rx); - donerx.recv().unwrap(); - donerx.recv().unwrap(); - t.join().unwrap(); - t2.join().unwrap(); - } - - #[test] - fn try_send1() { - let (tx, _rx) = sync_channel::(0); - assert_eq!(tx.try_send(1), Err(TrySendError::Full(1))); - } - - #[test] - fn try_send2() { - let (tx, _rx) = sync_channel::(1); - assert_eq!(tx.try_send(1), Ok(())); - assert_eq!(tx.try_send(1), Err(TrySendError::Full(1))); - } - - #[test] - fn try_send3() { - let (tx, rx) = sync_channel::(1); - assert_eq!(tx.try_send(1), Ok(())); - drop(rx); - assert_eq!(tx.try_send(1), Err(TrySendError::Disconnected(1))); - } - - #[test] - fn issue_15761() { - fn repro() { - let (tx1, rx1) = sync_channel::<()>(3); - let (tx2, rx2) = sync_channel::<()>(3); - - let _t = thread::spawn(move || { - rx1.recv().unwrap(); - tx2.try_send(()).unwrap(); - }); - - tx1.try_send(()).unwrap(); - rx2.recv().unwrap(); - } - - for _ in 0..100 { - repro() - } - } -} - -// Source: https://github.com/rust-lang/rust/blob/master/src/libstd/sync/mpsc/select.rs -mod select_tests { - use super::*; - - use std::thread; - - #[test] - fn smoke() { - let (tx1, rx1) = channel::(); - let (tx2, rx2) = channel::(); - tx1.send(1).unwrap(); - select! { - foo = rx1.recv() => { assert_eq!(foo.unwrap(), 1); }, - _bar = rx2.recv() => { panic!() } - } - tx2.send(2).unwrap(); - select! { - _foo = rx1.recv() => { panic!() }, - bar = rx2.recv() => { assert_eq!(bar.unwrap(), 2) } - } - drop(tx1); - select! { - foo = rx1.recv() => { assert!(foo.is_err()); }, - _bar = rx2.recv() => { panic!() } - } - drop(tx2); - select! { - bar = rx2.recv() => { assert!(bar.is_err()); } - } - } - - #[test] - fn smoke2() { - let (_tx1, rx1) = channel::(); - let (_tx2, rx2) = channel::(); - let (_tx3, rx3) = channel::(); - let (_tx4, rx4) = channel::(); - let (tx5, rx5) = channel::(); - tx5.send(4).unwrap(); - select! { - _foo = rx1.recv() => { panic!("1") }, - _foo = rx2.recv() => { panic!("2") }, - _foo = rx3.recv() => { panic!("3") }, - _foo = rx4.recv() => { panic!("4") }, - foo = rx5.recv() => { assert_eq!(foo.unwrap(), 4); } - } - } - - #[test] - fn closed() { - let (_tx1, rx1) = channel::(); - let (tx2, rx2) = channel::(); - drop(tx2); - - select! { - _a1 = rx1.recv() => { panic!() }, - a2 = rx2.recv() => { assert!(a2.is_err()); } - } - } - - #[test] - fn unblocks() { - let (tx1, rx1) = channel::(); - let (_tx2, rx2) = channel::(); - let (tx3, rx3) = channel::(); - - let t = thread::spawn(move || { - for _ in 0..20 { - thread::yield_now(); - } - tx1.send(1).unwrap(); - rx3.recv().unwrap(); - for _ in 0..20 { - thread::yield_now(); - } - }); - - select! { - a = rx1.recv() => { assert_eq!(a.unwrap(), 1); }, - _b = rx2.recv() => { panic!() } - } - tx3.send(1).unwrap(); - select! { - a = rx1.recv() => { assert!(a.is_err()) }, - _b = rx2.recv() => { panic!() } - } - t.join().unwrap(); - } - - #[test] - fn both_ready() { - let (tx1, rx1) = channel::(); - let (tx2, rx2) = channel::(); - let (tx3, rx3) = channel::<()>(); - - let t = thread::spawn(move || { - for _ in 0..20 { - thread::yield_now(); - } - tx1.send(1).unwrap(); - tx2.send(2).unwrap(); - rx3.recv().unwrap(); - }); - - select! { - a = rx1.recv() => { assert_eq!(a.unwrap(), 1); }, - a = rx2.recv() => { assert_eq!(a.unwrap(), 2); } - } - select! { - a = rx1.recv() => { assert_eq!(a.unwrap(), 1); }, - a = rx2.recv() => { assert_eq!(a.unwrap(), 2); } - } - assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty)); - assert_eq!(rx2.try_recv(), Err(TryRecvError::Empty)); - tx3.send(()).unwrap(); - t.join().unwrap(); - } - - #[test] - fn stress() { - const AMT: i32 = 10000; - let (tx1, rx1) = channel::(); - let (tx2, rx2) = channel::(); - let (tx3, rx3) = channel::<()>(); - - let t = thread::spawn(move || { - for i in 0..AMT { - if i % 2 == 0 { - tx1.send(i).unwrap(); - } else { - tx2.send(i).unwrap(); - } - rx3.recv().unwrap(); - } - }); - - for i in 0..AMT { - select! { - i1 = rx1.recv() => { assert!(i % 2 == 0 && i == i1.unwrap()); }, - i2 = rx2.recv() => { assert!(i % 2 == 1 && i == i2.unwrap()); } - } - tx3.send(()).unwrap(); - } - t.join().unwrap(); - } - - #[allow(unused_must_use)] - #[test] - fn cloning() { - let (tx1, rx1) = channel::(); - let (_tx2, rx2) = channel::(); - let (tx3, rx3) = channel::<()>(); - - let t = thread::spawn(move || { - rx3.recv().unwrap(); - tx1.clone(); - assert_eq!(rx3.try_recv(), Err(TryRecvError::Empty)); - tx1.send(2).unwrap(); - rx3.recv().unwrap(); - }); - - tx3.send(()).unwrap(); - select! { - _i1 = rx1.recv() => {}, - _i2 = rx2.recv() => panic!() - } - tx3.send(()).unwrap(); - t.join().unwrap(); - } - - #[allow(unused_must_use)] - #[test] - fn cloning2() { - let (tx1, rx1) = channel::(); - let (_tx2, rx2) = channel::(); - let (tx3, rx3) = channel::<()>(); - - let t = thread::spawn(move || { - rx3.recv().unwrap(); - tx1.clone(); - assert_eq!(rx3.try_recv(), Err(TryRecvError::Empty)); - tx1.send(2).unwrap(); - rx3.recv().unwrap(); - }); - - tx3.send(()).unwrap(); - select! { - _i1 = rx1.recv() => {}, - _i2 = rx2.recv() => panic!() - } - tx3.send(()).unwrap(); - t.join().unwrap(); - } - - #[test] - fn cloning3() { - let (tx1, rx1) = channel::<()>(); - let (tx2, rx2) = channel::<()>(); - let (tx3, rx3) = channel::<()>(); - let t = thread::spawn(move || { - select! { - _ = rx1.recv() => panic!(), - _ = rx2.recv() => {} - } - tx3.send(()).unwrap(); - }); - - for _ in 0..1000 { - thread::yield_now(); - } - drop(tx1.clone()); - tx2.send(()).unwrap(); - rx3.recv().unwrap(); - t.join().unwrap(); - } - - #[test] - fn preflight1() { - let (tx, rx) = channel(); - tx.send(()).unwrap(); - select! { - _n = rx.recv() => {} - } - } - - #[test] - fn preflight2() { - let (tx, rx) = channel(); - tx.send(()).unwrap(); - tx.send(()).unwrap(); - select! { - _n = rx.recv() => {} - } - } - - #[test] - fn preflight3() { - let (tx, rx) = channel(); - drop(tx.clone()); - tx.send(()).unwrap(); - select! { - _n = rx.recv() => {} - } - } - - #[test] - fn preflight4() { - let (tx, rx) = channel(); - tx.send(()).unwrap(); - select! { - _ = rx.recv() => {} - } - } - - #[test] - fn preflight5() { - let (tx, rx) = channel(); - tx.send(()).unwrap(); - tx.send(()).unwrap(); - select! { - _ = rx.recv() => {} - } - } - - #[test] - fn preflight6() { - let (tx, rx) = channel(); - drop(tx.clone()); - tx.send(()).unwrap(); - select! { - _ = rx.recv() => {} - } - } - - #[test] - fn preflight7() { - let (tx, rx) = channel::<()>(); - drop(tx); - select! { - _ = rx.recv() => {} - } - } - - #[test] - fn preflight8() { - let (tx, rx) = channel(); - tx.send(()).unwrap(); - drop(tx); - rx.recv().unwrap(); - select! { - _ = rx.recv() => {} - } - } - - #[test] - fn preflight9() { - let (tx, rx) = channel(); - drop(tx.clone()); - tx.send(()).unwrap(); - drop(tx); - rx.recv().unwrap(); - select! { - _ = rx.recv() => {} - } - } - - #[test] - fn oneshot_data_waiting() { - let (tx1, rx1) = channel(); - let (tx2, rx2) = channel(); - let t = thread::spawn(move || { - select! { - _n = rx1.recv() => {} - } - tx2.send(()).unwrap(); - }); - - for _ in 0..100 { - thread::yield_now() - } - tx1.send(()).unwrap(); - rx2.recv().unwrap(); - t.join().unwrap(); - } - - #[test] - fn stream_data_waiting() { - let (tx1, rx1) = channel(); - let (tx2, rx2) = channel(); - tx1.send(()).unwrap(); - tx1.send(()).unwrap(); - rx1.recv().unwrap(); - rx1.recv().unwrap(); - let t = thread::spawn(move || { - select! { - _n = rx1.recv() => {} - } - tx2.send(()).unwrap(); - }); - - for _ in 0..100 { - thread::yield_now() - } - tx1.send(()).unwrap(); - rx2.recv().unwrap(); - t.join().unwrap(); - } - - #[test] - fn shared_data_waiting() { - let (tx1, rx1) = channel(); - let (tx2, rx2) = channel(); - drop(tx1.clone()); - tx1.send(()).unwrap(); - rx1.recv().unwrap(); - let t = thread::spawn(move || { - select! { - _n = rx1.recv() => {} - } - tx2.send(()).unwrap(); - }); - - for _ in 0..100 { - thread::yield_now() - } - tx1.send(()).unwrap(); - rx2.recv().unwrap(); - t.join().unwrap(); - } - - #[test] - fn sync1() { - let (tx, rx) = sync_channel::(1); - tx.send(1).unwrap(); - select! { - n = rx.recv() => { assert_eq!(n.unwrap(), 1); } - } - } - - #[test] - fn sync2() { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - for _ in 0..100 { - thread::yield_now() - } - tx.send(1).unwrap(); - }); - select! { - n = rx.recv() => { assert_eq!(n.unwrap(), 1); } - } - t.join().unwrap(); - } - - #[test] - fn sync3() { - let (tx1, rx1) = sync_channel::(0); - let (tx2, rx2): (Sender, Receiver) = channel(); - let t = thread::spawn(move || { - tx1.send(1).unwrap(); - }); - let t2 = thread::spawn(move || { - tx2.send(2).unwrap(); - }); - select! { - n = rx1.recv() => { - let n = n.unwrap(); - assert_eq!(n, 1); - assert_eq!(rx2.recv().unwrap(), 2); - }, - n = rx2.recv() => { - let n = n.unwrap(); - assert_eq!(n, 2); - assert_eq!(rx1.recv().unwrap(), 1); - } - } - t.join().unwrap(); - t2.join().unwrap(); - } -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/tests/never.rs cargo-0.47.0/vendor/crossbeam-channel/tests/never.rs --- cargo-0.44.1/vendor/crossbeam-channel/tests/never.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/tests/never.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,99 +0,0 @@ -//! Tests for the never channel flavor. - -#[macro_use] -extern crate crossbeam_channel; -extern crate rand; - -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_channel::{never, tick, unbounded}; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn smoke() { - select! { - recv(never::()) -> _ => panic!(), - default => {} - } -} - -#[test] -fn optional() { - let (s, r) = unbounded::(); - s.send(1).unwrap(); - s.send(2).unwrap(); - - let mut r = Some(&r); - select! { - recv(r.unwrap_or(&never())) -> _ => {} - default => panic!(), - } - - r = None; - select! { - recv(r.unwrap_or(&never())) -> _ => panic!(), - default => {} - } -} - -#[test] -fn tick_n() { - let mut r = tick(ms(100)); - let mut step = 0; - - loop { - select! { - recv(r) -> _ => step += 1, - default(ms(500)) => break, - } - - if step == 10 { - r = never(); - } - } - - assert_eq!(step, 10); -} - -#[test] -fn capacity() { - let r = never::(); - assert_eq!(r.capacity(), Some(0)); -} - -#[test] -fn len_empty_full() { - let r = never::(); - assert_eq!(r.len(), 0); - assert_eq!(r.is_empty(), true); - assert_eq!(r.is_full(), true); -} - -#[test] -fn try_recv() { - let r = never::(); - assert!(r.try_recv().is_err()); - - thread::sleep(ms(100)); - assert!(r.try_recv().is_err()); -} - -#[test] -fn recv_timeout() { - let start = Instant::now(); - let r = never::(); - - assert!(r.recv_timeout(ms(100)).is_err()); - let now = Instant::now(); - assert!(now - start >= ms(100)); - assert!(now - start <= ms(150)); - - assert!(r.recv_timeout(ms(100)).is_err()); - let now = Instant::now(); - assert!(now - start >= ms(200)); - assert!(now - start <= ms(250)); -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/tests/ready.rs cargo-0.47.0/vendor/crossbeam-channel/tests/ready.rs --- cargo-0.44.1/vendor/crossbeam-channel/tests/ready.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/tests/ready.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,837 +0,0 @@ -//! Tests for channel readiness using the `Select` struct. - -extern crate crossbeam_channel; -extern crate crossbeam_utils; - -use std::any::Any; -use std::cell::Cell; -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_channel::{after, bounded, tick, unbounded}; -use crossbeam_channel::{Receiver, Select, TryRecvError, TrySendError}; -use crossbeam_utils::thread::scope; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn smoke1() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - s1.send(1).unwrap(); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - assert_eq!(sel.ready(), 0); - assert_eq!(r1.try_recv(), Ok(1)); - - s2.send(2).unwrap(); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - assert_eq!(sel.ready(), 1); - assert_eq!(r2.try_recv(), Ok(2)); -} - -#[test] -fn smoke2() { - let (_s1, r1) = unbounded::(); - let (_s2, r2) = unbounded::(); - let (_s3, r3) = unbounded::(); - let (_s4, r4) = unbounded::(); - let (s5, r5) = unbounded::(); - - s5.send(5).unwrap(); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - sel.recv(&r3); - sel.recv(&r4); - sel.recv(&r5); - assert_eq!(sel.ready(), 4); - assert_eq!(r5.try_recv(), Ok(5)); -} - -#[test] -fn disconnected() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - scope(|scope| { - scope.spawn(|_| { - drop(s1); - thread::sleep(ms(500)); - s2.send(5).unwrap(); - }); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - match sel.ready_timeout(ms(1000)) { - Ok(0) => assert_eq!(r1.try_recv(), Err(TryRecvError::Disconnected)), - _ => panic!(), - } - - r2.recv().unwrap(); - }) - .unwrap(); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - match sel.ready_timeout(ms(1000)) { - Ok(0) => assert_eq!(r1.try_recv(), Err(TryRecvError::Disconnected)), - _ => panic!(), - } - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - drop(s2); - }); - - let mut sel = Select::new(); - sel.recv(&r2); - match sel.ready_timeout(ms(1000)) { - Ok(0) => assert_eq!(r2.try_recv(), Err(TryRecvError::Disconnected)), - _ => panic!(), - } - }) - .unwrap(); -} - -#[test] -fn default() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - assert!(sel.try_ready().is_err()); - - drop(s1); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - match sel.try_ready() { - Ok(0) => assert!(r1.try_recv().is_err()), - _ => panic!(), - } - - s2.send(2).unwrap(); - - let mut sel = Select::new(); - sel.recv(&r2); - match sel.try_ready() { - Ok(0) => assert_eq!(r2.try_recv(), Ok(2)), - _ => panic!(), - } - - let mut sel = Select::new(); - sel.recv(&r2); - assert!(sel.try_ready().is_err()); - - let mut sel = Select::new(); - assert!(sel.try_ready().is_err()); -} - -#[test] -fn timeout() { - let (_s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(1500)); - s2.send(2).unwrap(); - }); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - assert!(sel.ready_timeout(ms(1000)).is_err()); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - match sel.ready_timeout(ms(1000)) { - Ok(1) => assert_eq!(r2.try_recv(), Ok(2)), - _ => panic!(), - } - }) - .unwrap(); - - scope(|scope| { - let (s, r) = unbounded::(); - - scope.spawn(move |_| { - thread::sleep(ms(500)); - drop(s); - }); - - let mut sel = Select::new(); - assert!(sel.ready_timeout(ms(1000)).is_err()); - - let mut sel = Select::new(); - sel.recv(&r); - match sel.try_ready() { - Ok(0) => assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)), - _ => panic!(), - } - }) - .unwrap(); -} - -#[test] -fn default_when_disconnected() { - let (_, r) = unbounded::(); - - let mut sel = Select::new(); - sel.recv(&r); - match sel.try_ready() { - Ok(0) => assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)), - _ => panic!(), - } - - let (_, r) = unbounded::(); - - let mut sel = Select::new(); - sel.recv(&r); - match sel.ready_timeout(ms(1000)) { - Ok(0) => assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)), - _ => panic!(), - } - - let (s, _) = bounded::(0); - - let mut sel = Select::new(); - sel.send(&s); - match sel.try_ready() { - Ok(0) => assert_eq!(s.try_send(0), Err(TrySendError::Disconnected(0))), - _ => panic!(), - } - - let (s, _) = bounded::(0); - - let mut sel = Select::new(); - sel.send(&s); - match sel.ready_timeout(ms(1000)) { - Ok(0) => assert_eq!(s.try_send(0), Err(TrySendError::Disconnected(0))), - _ => panic!(), - } -} - -#[test] -fn default_only() { - let start = Instant::now(); - - let mut sel = Select::new(); - assert!(sel.try_ready().is_err()); - let now = Instant::now(); - assert!(now - start <= ms(50)); - - let start = Instant::now(); - let mut sel = Select::new(); - assert!(sel.ready_timeout(ms(500)).is_err()); - let now = Instant::now(); - assert!(now - start >= ms(450)); - assert!(now - start <= ms(550)); -} - -#[test] -fn unblocks() { - let (s1, r1) = bounded::(0); - let (s2, r2) = bounded::(0); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - s2.send(2).unwrap(); - }); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - match sel.ready_timeout(ms(1000)) { - Ok(1) => assert_eq!(r2.try_recv(), Ok(2)), - _ => panic!(), - } - }) - .unwrap(); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - assert_eq!(r1.recv().unwrap(), 1); - }); - - let mut sel = Select::new(); - let oper1 = sel.send(&s1); - let oper2 = sel.send(&s2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => oper.send(&s1, 1).unwrap(), - i if i == oper2 => panic!(), - _ => unreachable!(), - }, - } - }) - .unwrap(); -} - -#[test] -fn both_ready() { - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - s1.send(1).unwrap(); - assert_eq!(r2.recv().unwrap(), 2); - }); - - for _ in 0..2 { - let mut sel = Select::new(); - sel.recv(&r1); - sel.send(&s2); - match sel.ready() { - 0 => assert_eq!(r1.try_recv(), Ok(1)), - 1 => s2.try_send(2).unwrap(), - _ => panic!(), - } - } - }) - .unwrap(); -} - -#[test] -fn cloning1() { - scope(|scope| { - let (s1, r1) = unbounded::(); - let (_s2, r2) = unbounded::(); - let (s3, r3) = unbounded::<()>(); - - scope.spawn(move |_| { - r3.recv().unwrap(); - drop(s1.clone()); - assert!(r3.try_recv().is_err()); - s1.send(1).unwrap(); - r3.recv().unwrap(); - }); - - s3.send(()).unwrap(); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - match sel.ready() { - 0 => drop(r1.try_recv()), - 1 => drop(r2.try_recv()), - _ => panic!(), - } - - s3.send(()).unwrap(); - }) - .unwrap(); -} - -#[test] -fn cloning2() { - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = unbounded::<()>(); - let (_s3, _r3) = unbounded::<()>(); - - scope(|scope| { - scope.spawn(move |_| { - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - match sel.ready() { - 0 => panic!(), - 1 => drop(r2.try_recv()), - _ => panic!(), - } - }); - - thread::sleep(ms(500)); - drop(s1.clone()); - s2.send(()).unwrap(); - }) - .unwrap(); -} - -#[test] -fn preflight1() { - let (s, r) = unbounded(); - s.send(()).unwrap(); - - let mut sel = Select::new(); - sel.recv(&r); - match sel.ready() { - 0 => drop(r.try_recv()), - _ => panic!(), - } -} - -#[test] -fn preflight2() { - let (s, r) = unbounded(); - drop(s.clone()); - s.send(()).unwrap(); - drop(s); - - let mut sel = Select::new(); - sel.recv(&r); - match sel.ready() { - 0 => assert_eq!(r.try_recv(), Ok(())), - _ => panic!(), - } - - assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); -} - -#[test] -fn preflight3() { - let (s, r) = unbounded(); - drop(s.clone()); - s.send(()).unwrap(); - drop(s); - r.recv().unwrap(); - - let mut sel = Select::new(); - sel.recv(&r); - match sel.ready() { - 0 => assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)), - _ => panic!(), - } -} - -#[test] -fn duplicate_operations() { - let (s, r) = unbounded::(); - let hit = vec![Cell::new(false); 4]; - - while hit.iter().map(|h| h.get()).any(|hit| !hit) { - let mut sel = Select::new(); - sel.recv(&r); - sel.recv(&r); - sel.send(&s); - sel.send(&s); - match sel.ready() { - 0 => { - assert!(r.try_recv().is_ok()); - hit[0].set(true); - } - 1 => { - assert!(r.try_recv().is_ok()); - hit[1].set(true); - } - 2 => { - assert!(s.try_send(0).is_ok()); - hit[2].set(true); - } - 3 => { - assert!(s.try_send(0).is_ok()); - hit[3].set(true); - } - _ => panic!(), - } - } -} - -#[test] -fn nesting() { - let (s, r) = unbounded::(); - - let mut sel = Select::new(); - sel.send(&s); - match sel.ready() { - 0 => { - assert!(s.try_send(0).is_ok()); - - let mut sel = Select::new(); - sel.recv(&r); - match sel.ready() { - 0 => { - assert_eq!(r.try_recv(), Ok(0)); - - let mut sel = Select::new(); - sel.send(&s); - match sel.ready() { - 0 => { - assert!(s.try_send(1).is_ok()); - - let mut sel = Select::new(); - sel.recv(&r); - match sel.ready() { - 0 => { - assert_eq!(r.try_recv(), Ok(1)); - } - _ => panic!(), - } - } - _ => panic!(), - } - } - _ => panic!(), - } - } - _ => panic!(), - } -} - -#[test] -fn stress_recv() { - const COUNT: usize = 10_000; - - let (s1, r1) = unbounded(); - let (s2, r2) = bounded(5); - let (s3, r3) = bounded(0); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - s1.send(i).unwrap(); - r3.recv().unwrap(); - - s2.send(i).unwrap(); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - match sel.ready() { - 0 => assert_eq!(r1.try_recv(), Ok(i)), - 1 => assert_eq!(r2.try_recv(), Ok(i)), - _ => panic!(), - } - - s3.send(()).unwrap(); - } - } - }) - .unwrap(); -} - -#[test] -fn stress_send() { - const COUNT: usize = 10_000; - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - assert_eq!(r1.recv().unwrap(), i); - assert_eq!(r2.recv().unwrap(), i); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - let mut sel = Select::new(); - sel.send(&s1); - sel.send(&s2); - match sel.ready() { - 0 => assert!(s1.try_send(i).is_ok()), - 1 => assert!(s2.try_send(i).is_ok()), - _ => panic!(), - } - } - s3.send(()).unwrap(); - } - }) - .unwrap(); -} - -#[test] -fn stress_mixed() { - const COUNT: usize = 10_000; - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - s1.send(i).unwrap(); - assert_eq!(r2.recv().unwrap(), i); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - let mut sel = Select::new(); - sel.recv(&r1); - sel.send(&s2); - match sel.ready() { - 0 => assert_eq!(r1.try_recv(), Ok(i)), - 1 => assert!(s2.try_send(i).is_ok()), - _ => panic!(), - } - } - s3.send(()).unwrap(); - } - }) - .unwrap(); -} - -#[test] -fn stress_timeout_two_threads() { - const COUNT: usize = 20; - - let (s, r) = bounded(2); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(500)); - } - - let done = false; - while !done { - let mut sel = Select::new(); - sel.send(&s); - match sel.ready_timeout(ms(100)) { - Err(_) => {} - Ok(0) => { - assert!(s.try_send(i).is_ok()); - break; - } - Ok(_) => panic!(), - } - } - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(500)); - } - - let mut done = false; - while !done { - let mut sel = Select::new(); - sel.recv(&r); - match sel.ready_timeout(ms(100)) { - Err(_) => {} - Ok(0) => { - assert_eq!(r.try_recv(), Ok(i)); - done = true; - } - Ok(_) => panic!(), - } - } - } - }); - }) - .unwrap(); -} - -#[test] -fn send_recv_same_channel() { - let (s, r) = bounded::(0); - let mut sel = Select::new(); - sel.send(&s); - sel.recv(&r); - assert!(sel.ready_timeout(ms(100)).is_err()); - - let (s, r) = unbounded::(); - let mut sel = Select::new(); - sel.send(&s); - sel.recv(&r); - match sel.ready_timeout(ms(100)) { - Err(_) => panic!(), - Ok(0) => assert!(s.try_send(0).is_ok()), - Ok(_) => panic!(), - } -} - -#[test] -fn channel_through_channel() { - const COUNT: usize = 1000; - - type T = Box; - - for cap in 1..4 { - let (s, r) = bounded::(cap); - - scope(|scope| { - scope.spawn(move |_| { - let mut s = s; - - for _ in 0..COUNT { - let (new_s, new_r) = bounded(cap); - let new_r: T = Box::new(Some(new_r)); - - { - let mut sel = Select::new(); - sel.send(&s); - match sel.ready() { - 0 => assert!(s.try_send(new_r).is_ok()), - _ => panic!(), - } - } - - s = new_s; - } - }); - - scope.spawn(move |_| { - let mut r = r; - - for _ in 0..COUNT { - let new = { - let mut sel = Select::new(); - sel.recv(&r); - match sel.ready() { - 0 => r - .try_recv() - .unwrap() - .downcast_mut::>>() - .unwrap() - .take() - .unwrap(), - _ => panic!(), - } - }; - r = new; - } - }); - }) - .unwrap(); - } -} - -#[test] -fn fairness1() { - const COUNT: usize = 10_000; - - let (s1, r1) = bounded::<()>(COUNT); - let (s2, r2) = unbounded::<()>(); - - for _ in 0..COUNT { - s1.send(()).unwrap(); - s2.send(()).unwrap(); - } - - let hits = vec![Cell::new(0usize); 4]; - for _ in 0..COUNT { - let after = after(ms(0)); - let tick = tick(ms(0)); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - sel.recv(&after); - sel.recv(&tick); - match sel.ready() { - 0 => { - r1.try_recv().unwrap(); - hits[0].set(hits[0].get() + 1); - } - 1 => { - r2.try_recv().unwrap(); - hits[1].set(hits[1].get() + 1); - } - 2 => { - after.try_recv().unwrap(); - hits[2].set(hits[2].get() + 1); - } - 3 => { - tick.try_recv().unwrap(); - hits[3].set(hits[3].get() + 1); - } - _ => panic!(), - } - } - assert!(hits.iter().all(|x| x.get() >= COUNT / hits.len() / 2)); -} - -#[test] -fn fairness2() { - const COUNT: usize = 10_000; - - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = bounded::<()>(1); - let (s3, r3) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..COUNT { - let mut sel = Select::new(); - let mut oper1 = None; - let mut oper2 = None; - if s1.is_empty() { - oper1 = Some(sel.send(&s1)); - } - if s2.is_empty() { - oper2 = Some(sel.send(&s2)); - } - let oper3 = sel.send(&s3); - let oper = sel.select(); - match oper.index() { - i if Some(i) == oper1 => assert!(oper.send(&s1, ()).is_ok()), - i if Some(i) == oper2 => assert!(oper.send(&s2, ()).is_ok()), - i if i == oper3 => assert!(oper.send(&s3, ()).is_ok()), - _ => unreachable!(), - } - } - }); - - let hits = vec![Cell::new(0usize); 3]; - for _ in 0..COUNT { - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - sel.recv(&r3); - loop { - match sel.ready() { - 0 => { - if r1.try_recv().is_ok() { - hits[0].set(hits[0].get() + 1); - break; - } - } - 1 => { - if r2.try_recv().is_ok() { - hits[1].set(hits[1].get() + 1); - break; - } - } - 2 => { - if r3.try_recv().is_ok() { - hits[2].set(hits[2].get() + 1); - break; - } - } - _ => unreachable!(), - } - } - } - assert!(hits.iter().all(|x| x.get() >= COUNT / hits.len() / 10)); - }) - .unwrap(); -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/tests/same_channel.rs cargo-0.47.0/vendor/crossbeam-channel/tests/same_channel.rs --- cargo-0.44.1/vendor/crossbeam-channel/tests/same_channel.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/tests/same_channel.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,114 +0,0 @@ -extern crate crossbeam_channel; - -use std::time::Duration; - -use crossbeam_channel::{after, bounded, never, tick, unbounded}; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn after_same_channel() { - let r = after(ms(50)); - - let r2 = r.clone(); - assert!(r.same_channel(&r2)); - - let r3 = after(ms(50)); - assert!(!r.same_channel(&r3)); - assert!(!r2.same_channel(&r3)); - - let r4 = after(ms(100)); - assert!(!r.same_channel(&r4)); - assert!(!r2.same_channel(&r4)); -} - -#[test] -fn array_same_channel() { - let (s, r) = bounded::(1); - - let s2 = s.clone(); - assert!(s.same_channel(&s2)); - - let r2 = r.clone(); - assert!(r.same_channel(&r2)); - - let (s3, r3) = bounded::(1); - assert!(!s.same_channel(&s3)); - assert!(!s2.same_channel(&s3)); - assert!(!r.same_channel(&r3)); - assert!(!r2.same_channel(&r3)); -} - -#[test] -fn list_same_channel() { - let (s, r) = unbounded::(); - - let s2 = s.clone(); - assert!(s.same_channel(&s2)); - - let r2 = r.clone(); - assert!(r.same_channel(&r2)); - - let (s3, r3) = unbounded::(); - assert!(!s.same_channel(&s3)); - assert!(!s2.same_channel(&s3)); - assert!(!r.same_channel(&r3)); - assert!(!r2.same_channel(&r3)); -} - -#[test] -fn never_same_channel() { - let r = never::(); - - let r2 = r.clone(); - assert!(r.same_channel(&r2)); - - // Never channel are always equal to one another. - let r3 = never::(); - assert!(r.same_channel(&r3)); - assert!(r2.same_channel(&r3)); -} - -#[test] -fn tick_same_channel() { - let r = tick(ms(50)); - - let r2 = r.clone(); - assert!(r.same_channel(&r2)); - - let r3 = tick(ms(50)); - assert!(!r.same_channel(&r3)); - assert!(!r2.same_channel(&r3)); - - let r4 = tick(ms(100)); - assert!(!r.same_channel(&r4)); - assert!(!r2.same_channel(&r4)); -} - -#[test] -fn zero_same_channel() { - let (s, r) = bounded::(0); - - let s2 = s.clone(); - assert!(s.same_channel(&s2)); - - let r2 = r.clone(); - assert!(r.same_channel(&r2)); - - let (s3, r3) = bounded::(0); - assert!(!s.same_channel(&s3)); - assert!(!s2.same_channel(&s3)); - assert!(!r.same_channel(&r3)); - assert!(!r2.same_channel(&r3)); -} - -#[test] -fn different_flavors_same_channel() { - let (s1, r1) = bounded::(0); - let (s2, r2) = unbounded::(); - - assert!(!s1.same_channel(&s2)); - assert!(!r1.same_channel(&r2)); -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/tests/select_macro.rs cargo-0.47.0/vendor/crossbeam-channel/tests/select_macro.rs --- cargo-0.44.1/vendor/crossbeam-channel/tests/select_macro.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/tests/select_macro.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1440 +0,0 @@ -//! Tests for the `select!` macro. - -#![deny(unsafe_code)] - -#[macro_use] -extern crate crossbeam_channel; -extern crate crossbeam_utils; - -use std::any::Any; -use std::cell::Cell; -use std::ops::Deref; -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_channel::{after, bounded, never, tick, unbounded}; -use crossbeam_channel::{Receiver, RecvError, SendError, Sender, TryRecvError}; -use crossbeam_utils::thread::scope; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn smoke1() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - s1.send(1).unwrap(); - - select! { - recv(r1) -> v => assert_eq!(v, Ok(1)), - recv(r2) -> _ => panic!(), - } - - s2.send(2).unwrap(); - - select! { - recv(r1) -> _ => panic!(), - recv(r2) -> v => assert_eq!(v, Ok(2)), - } -} - -#[test] -fn smoke2() { - let (_s1, r1) = unbounded::(); - let (_s2, r2) = unbounded::(); - let (_s3, r3) = unbounded::(); - let (_s4, r4) = unbounded::(); - let (s5, r5) = unbounded::(); - - s5.send(5).unwrap(); - - select! { - recv(r1) -> _ => panic!(), - recv(r2) -> _ => panic!(), - recv(r3) -> _ => panic!(), - recv(r4) -> _ => panic!(), - recv(r5) -> v => assert_eq!(v, Ok(5)), - } -} - -#[test] -fn disconnected() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - scope(|scope| { - scope.spawn(|_| { - drop(s1); - thread::sleep(ms(500)); - s2.send(5).unwrap(); - }); - - select! { - recv(r1) -> v => assert!(v.is_err()), - recv(r2) -> _ => panic!(), - default(ms(1000)) => panic!(), - } - - r2.recv().unwrap(); - }) - .unwrap(); - - select! { - recv(r1) -> v => assert!(v.is_err()), - recv(r2) -> _ => panic!(), - default(ms(1000)) => panic!(), - } - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - drop(s2); - }); - - select! { - recv(r2) -> v => assert!(v.is_err()), - default(ms(1000)) => panic!(), - } - }) - .unwrap(); -} - -#[test] -fn default() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - select! { - recv(r1) -> _ => panic!(), - recv(r2) -> _ => panic!(), - default => {} - } - - drop(s1); - - select! { - recv(r1) -> v => assert!(v.is_err()), - recv(r2) -> _ => panic!(), - default => panic!(), - } - - s2.send(2).unwrap(); - - select! { - recv(r2) -> v => assert_eq!(v, Ok(2)), - default => panic!(), - } - - select! { - recv(r2) -> _ => panic!(), - default => {}, - } - - select! { - default => {}, - } -} - -#[test] -fn timeout() { - let (_s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(1500)); - s2.send(2).unwrap(); - }); - - select! { - recv(r1) -> _ => panic!(), - recv(r2) -> _ => panic!(), - default(ms(1000)) => {}, - } - - select! { - recv(r1) -> _ => panic!(), - recv(r2) -> v => assert_eq!(v, Ok(2)), - default(ms(1000)) => panic!(), - } - }) - .unwrap(); - - scope(|scope| { - let (s, r) = unbounded::(); - - scope.spawn(move |_| { - thread::sleep(ms(500)); - drop(s); - }); - - select! { - default(ms(1000)) => { - select! { - recv(r) -> v => assert!(v.is_err()), - default => panic!(), - } - } - } - }) - .unwrap(); -} - -#[test] -fn default_when_disconnected() { - let (_, r) = unbounded::(); - - select! { - recv(r) -> res => assert!(res.is_err()), - default => panic!(), - } - - let (_, r) = unbounded::(); - - select! { - recv(r) -> res => assert!(res.is_err()), - default(ms(1000)) => panic!(), - } - - let (s, _) = bounded::(0); - - select! { - send(s, 0) -> res => assert!(res.is_err()), - default => panic!(), - } - - let (s, _) = bounded::(0); - - select! { - send(s, 0) -> res => assert!(res.is_err()), - default(ms(1000)) => panic!(), - } -} - -#[test] -fn default_only() { - let start = Instant::now(); - select! { - default => {} - } - let now = Instant::now(); - assert!(now - start <= ms(50)); - - let start = Instant::now(); - select! { - default(ms(500)) => {} - } - let now = Instant::now(); - assert!(now - start >= ms(450)); - assert!(now - start <= ms(550)); -} - -#[test] -fn unblocks() { - let (s1, r1) = bounded::(0); - let (s2, r2) = bounded::(0); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - s2.send(2).unwrap(); - }); - - select! { - recv(r1) -> _ => panic!(), - recv(r2) -> v => assert_eq!(v, Ok(2)), - default(ms(1000)) => panic!(), - } - }) - .unwrap(); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - assert_eq!(r1.recv().unwrap(), 1); - }); - - select! { - send(s1, 1) -> _ => {}, - send(s2, 2) -> _ => panic!(), - default(ms(1000)) => panic!(), - } - }) - .unwrap(); -} - -#[test] -fn both_ready() { - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - s1.send(1).unwrap(); - assert_eq!(r2.recv().unwrap(), 2); - }); - - for _ in 0..2 { - select! { - recv(r1) -> v => assert_eq!(v, Ok(1)), - send(s2, 2) -> _ => {}, - } - } - }) - .unwrap(); -} - -#[test] -fn loop_try() { - const RUNS: usize = 20; - - for _ in 0..RUNS { - let (s1, r1) = bounded::(0); - let (s2, r2) = bounded::(0); - let (s_end, r_end) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(|_| loop { - select! { - send(s1, 1) -> _ => break, - default => {} - } - - select! { - recv(r_end) -> _ => break, - default => {} - } - }); - - scope.spawn(|_| loop { - if let Ok(x) = r2.try_recv() { - assert_eq!(x, 2); - break; - } - - select! { - recv(r_end) -> _ => break, - default => {} - } - }); - - scope.spawn(|_| { - thread::sleep(ms(500)); - - select! { - recv(r1) -> v => assert_eq!(v, Ok(1)), - send(s2, 2) -> _ => {}, - default(ms(500)) => panic!(), - } - - drop(s_end); - }); - }) - .unwrap(); - } -} - -#[test] -fn cloning1() { - scope(|scope| { - let (s1, r1) = unbounded::(); - let (_s2, r2) = unbounded::(); - let (s3, r3) = unbounded::<()>(); - - scope.spawn(move |_| { - r3.recv().unwrap(); - drop(s1.clone()); - assert_eq!(r3.try_recv(), Err(TryRecvError::Empty)); - s1.send(1).unwrap(); - r3.recv().unwrap(); - }); - - s3.send(()).unwrap(); - - select! { - recv(r1) -> _ => {}, - recv(r2) -> _ => {}, - } - - s3.send(()).unwrap(); - }) - .unwrap(); -} - -#[test] -fn cloning2() { - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = unbounded::<()>(); - let (_s3, _r3) = unbounded::<()>(); - - scope(|scope| { - scope.spawn(move |_| { - select! { - recv(r1) -> _ => panic!(), - recv(r2) -> _ => {}, - } - }); - - thread::sleep(ms(500)); - drop(s1.clone()); - s2.send(()).unwrap(); - }) - .unwrap(); -} - -#[test] -fn preflight1() { - let (s, r) = unbounded(); - s.send(()).unwrap(); - - select! { - recv(r) -> _ => {} - } -} - -#[test] -fn preflight2() { - let (s, r) = unbounded(); - drop(s.clone()); - s.send(()).unwrap(); - drop(s); - - select! { - recv(r) -> v => assert!(v.is_ok()), - } - assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); -} - -#[test] -fn preflight3() { - let (s, r) = unbounded(); - drop(s.clone()); - s.send(()).unwrap(); - drop(s); - r.recv().unwrap(); - - select! { - recv(r) -> v => assert!(v.is_err()) - } -} - -#[test] -fn duplicate_operations() { - let (s, r) = unbounded::(); - let mut hit = [false; 4]; - - while hit.iter().any(|hit| !hit) { - select! { - recv(r) -> _ => hit[0] = true, - recv(r) -> _ => hit[1] = true, - send(s, 0) -> _ => hit[2] = true, - send(s, 0) -> _ => hit[3] = true, - } - } -} - -#[test] -fn nesting() { - let (s, r) = unbounded::(); - - select! { - send(s, 0) -> _ => { - select! { - recv(r) -> v => { - assert_eq!(v, Ok(0)); - select! { - send(s, 1) -> _ => { - select! { - recv(r) -> v => { - assert_eq!(v, Ok(1)); - } - } - } - } - } - } - } - } -} - -#[test] -#[should_panic(expected = "send panicked")] -fn panic_sender() { - fn get() -> Sender { - panic!("send panicked") - } - - #[allow(unreachable_code)] - { - select! { - send(get(), panic!()) -> _ => {} - } - } -} - -#[test] -#[should_panic(expected = "recv panicked")] -fn panic_receiver() { - fn get() -> Receiver { - panic!("recv panicked") - } - - select! { - recv(get()) -> _ => {} - } -} - -#[test] -fn stress_recv() { - const COUNT: usize = 10_000; - - let (s1, r1) = unbounded(); - let (s2, r2) = bounded(5); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - s1.send(i).unwrap(); - r3.recv().unwrap(); - - s2.send(i).unwrap(); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - select! { - recv(r1) -> v => assert_eq!(v, Ok(i)), - recv(r2) -> v => assert_eq!(v, Ok(i)), - } - - s3.send(()).unwrap(); - } - } - }) - .unwrap(); -} - -#[test] -fn stress_send() { - const COUNT: usize = 10_000; - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - assert_eq!(r1.recv().unwrap(), i); - assert_eq!(r2.recv().unwrap(), i); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - select! { - send(s1, i) -> _ => {}, - send(s2, i) -> _ => {}, - } - } - s3.send(()).unwrap(); - } - }) - .unwrap(); -} - -#[test] -fn stress_mixed() { - const COUNT: usize = 10_000; - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - s1.send(i).unwrap(); - assert_eq!(r2.recv().unwrap(), i); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - select! { - recv(r1) -> v => assert_eq!(v, Ok(i)), - send(s2, i) -> _ => {}, - } - } - s3.send(()).unwrap(); - } - }) - .unwrap(); -} - -#[test] -fn stress_timeout_two_threads() { - const COUNT: usize = 20; - - let (s, r) = bounded(2); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(500)); - } - - loop { - select! { - send(s, i) -> _ => break, - default(ms(100)) => {} - } - } - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(500)); - } - - loop { - select! { - recv(r) -> v => { - assert_eq!(v, Ok(i)); - break; - } - default(ms(100)) => {} - } - } - } - }); - }) - .unwrap(); -} - -#[test] -fn send_recv_same_channel() { - let (s, r) = bounded::(0); - select! { - send(s, 0) -> _ => panic!(), - recv(r) -> _ => panic!(), - default(ms(500)) => {} - } - - let (s, r) = unbounded::(); - select! { - send(s, 0) -> _ => {}, - recv(r) -> _ => panic!(), - default(ms(500)) => panic!(), - } -} - -#[test] -fn matching() { - const THREADS: usize = 44; - - let (s, r) = &bounded::(0); - - scope(|scope| { - for i in 0..THREADS { - scope.spawn(move |_| { - select! { - recv(r) -> v => assert_ne!(v.unwrap(), i), - send(s, i) -> _ => {}, - } - }); - } - }) - .unwrap(); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn matching_with_leftover() { - const THREADS: usize = 55; - - let (s, r) = &bounded::(0); - - scope(|scope| { - for i in 0..THREADS { - scope.spawn(move |_| { - select! { - recv(r) -> v => assert_ne!(v.unwrap(), i), - send(s, i) -> _ => {}, - } - }); - } - s.send(!0).unwrap(); - }) - .unwrap(); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn channel_through_channel() { - const COUNT: usize = 1000; - - type T = Box; - - for cap in 0..3 { - let (s, r) = bounded::(cap); - - scope(|scope| { - scope.spawn(move |_| { - let mut s = s; - - for _ in 0..COUNT { - let (new_s, new_r) = bounded(cap); - let new_r: T = Box::new(Some(new_r)); - - select! { - send(s, new_r) -> _ => {} - } - - s = new_s; - } - }); - - scope.spawn(move |_| { - let mut r = r; - - for _ in 0..COUNT { - r = select! { - recv(r) -> msg => { - msg.unwrap() - .downcast_mut::>>() - .unwrap() - .take() - .unwrap() - } - } - } - }); - }) - .unwrap(); - } -} - -#[test] -fn linearizable_default() { - const COUNT: usize = 100_000; - - for step in 0..2 { - let (start_s, start_r) = bounded::<()>(0); - let (end_s, end_r) = bounded::<()>(0); - - let ((s1, r1), (s2, r2)) = if step == 0 { - (bounded::(1), bounded::(1)) - } else { - (unbounded::(), unbounded::()) - }; - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..COUNT { - start_s.send(()).unwrap(); - - s1.send(1).unwrap(); - select! { - recv(r1) -> _ => {} - recv(r2) -> _ => {} - default => unreachable!() - } - - end_s.send(()).unwrap(); - let _ = r2.try_recv(); - } - }); - - for _ in 0..COUNT { - start_r.recv().unwrap(); - - s2.send(1).unwrap(); - let _ = r1.try_recv(); - - end_r.recv().unwrap(); - } - }) - .unwrap(); - } -} - -#[test] -fn linearizable_timeout() { - const COUNT: usize = 100_000; - - for step in 0..2 { - let (start_s, start_r) = bounded::<()>(0); - let (end_s, end_r) = bounded::<()>(0); - - let ((s1, r1), (s2, r2)) = if step == 0 { - (bounded::(1), bounded::(1)) - } else { - (unbounded::(), unbounded::()) - }; - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..COUNT { - start_s.send(()).unwrap(); - - s1.send(1).unwrap(); - select! { - recv(r1) -> _ => {} - recv(r2) -> _ => {} - default(ms(0)) => unreachable!() - } - - end_s.send(()).unwrap(); - let _ = r2.try_recv(); - } - }); - - for _ in 0..COUNT { - start_r.recv().unwrap(); - - s2.send(1).unwrap(); - let _ = r1.try_recv(); - - end_r.recv().unwrap(); - } - }) - .unwrap(); - } -} - -#[test] -fn fairness1() { - const COUNT: usize = 10_000; - - let (s1, r1) = bounded::<()>(COUNT); - let (s2, r2) = unbounded::<()>(); - - for _ in 0..COUNT { - s1.send(()).unwrap(); - s2.send(()).unwrap(); - } - - let mut hits = [0usize; 4]; - for _ in 0..COUNT { - select! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - recv(after(ms(0))) -> _ => hits[2] += 1, - recv(tick(ms(0))) -> _ => hits[3] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); -} - -#[test] -fn fairness2() { - const COUNT: usize = 10_000; - - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = bounded::<()>(1); - let (s3, r3) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(|_| { - let (hole, _r) = bounded(0); - - for _ in 0..COUNT { - let s1 = if s1.is_empty() { &s1 } else { &hole }; - let s2 = if s2.is_empty() { &s2 } else { &hole }; - - select! { - send(s1, ()) -> res => assert!(res.is_ok()), - send(s2, ()) -> res => assert!(res.is_ok()), - send(s3, ()) -> res => assert!(res.is_ok()), - } - } - }); - - let hits = vec![Cell::new(0usize); 3]; - for _ in 0..COUNT { - select! { - recv(r1) -> _ => hits[0].set(hits[0].get() + 1), - recv(r2) -> _ => hits[1].set(hits[1].get() + 1), - recv(r3) -> _ => hits[2].set(hits[2].get() + 1), - } - } - assert!(hits.iter().all(|x| x.get() >= COUNT / hits.len() / 50)); - }) - .unwrap(); -} - -#[test] -fn fairness_recv() { - const COUNT: usize = 10_000; - - let (s1, r1) = bounded::<()>(COUNT); - let (s2, r2) = unbounded::<()>(); - - for _ in 0..COUNT { - s1.send(()).unwrap(); - s2.send(()).unwrap(); - } - - let mut hits = [0usize; 2]; - while hits[0] + hits[1] < COUNT { - select! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / 4)); -} - -#[test] -fn fairness_send() { - const COUNT: usize = 10_000; - - let (s1, _r1) = bounded::<()>(COUNT); - let (s2, _r2) = unbounded::<()>(); - - let mut hits = [0usize; 2]; - for _ in 0..COUNT { - select! { - send(s1, ()) -> _ => hits[0] += 1, - send(s2, ()) -> _ => hits[1] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / 4)); -} - -#[test] -fn references() { - let (s, r) = unbounded::(); - select! { - send(s, 0) -> _ => {} - recv(r) -> _ => {} - } - select! { - send(&&&&s, 0) -> _ => {} - recv(&&&&r) -> _ => {} - } - select! { - recv(Some(&r).unwrap_or(&never())) -> _ => {}, - default => {} - } - select! { - recv(Some(r).unwrap_or(never())) -> _ => {}, - default => {} - } -} - -#[test] -fn case_blocks() { - let (s, r) = unbounded::(); - - select! { - recv(r) -> _ => 3.0, - recv(r) -> _ => loop { - unreachable!() - }, - recv(r) -> _ => match 7 + 3 { - _ => unreachable!() - }, - default => 7. - }; - - select! { - recv(r) -> msg => if msg.is_ok() { - unreachable!() - }, - default => () - } - - drop(s); -} - -#[test] -fn move_handles() { - let (s, r) = unbounded::(); - select! { - recv((move || r)()) -> _ => {} - send((move || s)(), 0) -> _ => {} - } -} - -#[test] -fn infer_types() { - let (s, r) = unbounded(); - select! { - recv(r) -> _ => {} - default => {} - } - s.send(()).unwrap(); - - let (s, r) = unbounded(); - select! { - send(s, ()) -> _ => {} - } - r.recv().unwrap(); -} - -#[test] -fn default_syntax() { - let (s, r) = bounded::(0); - - select! { - recv(r) -> _ => panic!(), - default => {} - } - select! { - send(s, 0) -> _ => panic!(), - default() => {} - } - select! { - default => {} - } - select! { - default() => {} - } -} - -#[test] -fn same_variable_name() { - let (_, r) = unbounded::(); - select! { - recv(r) -> r => assert!(r.is_err()), - } -} - -#[test] -fn handles_on_heap() { - let (s, r) = unbounded::(); - let (s, r) = (Box::new(s), Box::new(r)); - - select! { - send(*s, 0) -> _ => {} - recv(*r) -> _ => {} - default => {} - } - - drop(s); - drop(r); -} - -#[test] -fn once_blocks() { - let (s, r) = unbounded::(); - - let once = Box::new(()); - select! { - send(s, 0) -> _ => drop(once), - } - - let once = Box::new(()); - select! { - recv(r) -> _ => drop(once), - } - - let once1 = Box::new(()); - let once2 = Box::new(()); - select! { - send(s, 0) -> _ => drop(once1), - default => drop(once2), - } - - let once1 = Box::new(()); - let once2 = Box::new(()); - select! { - recv(r) -> _ => drop(once1), - default => drop(once2), - } - - let once1 = Box::new(()); - let once2 = Box::new(()); - select! { - recv(r) -> _ => drop(once1), - send(s, 0) -> _ => drop(once2), - } -} - -#[test] -fn once_receiver() { - let (_, r) = unbounded::(); - - let once = Box::new(()); - let get = move || { - drop(once); - r - }; - - select! { - recv(get()) -> _ => {} - } -} - -#[test] -fn once_sender() { - let (s, _) = unbounded::(); - - let once = Box::new(()); - let get = move || { - drop(once); - s - }; - - select! { - send(get(), 5) -> _ => {} - } -} - -#[test] -fn parse_nesting() { - let (_, r) = unbounded::(); - - select! { - recv(r) -> _ => {} - recv(r) -> _ => { - select! { - recv(r) -> _ => {} - recv(r) -> _ => { - select! { - recv(r) -> _ => {} - recv(r) -> _ => { - select! { - default => {} - } - } - } - } - } - } - } -} - -#[test] -fn evaluate() { - let (s, r) = unbounded::(); - - let v = select! { - recv(r) -> _ => "foo".into(), - send(s, 0) -> _ => "bar".to_owned(), - default => "baz".to_string(), - }; - assert_eq!(v, "bar"); - - let v = select! { - recv(r) -> _ => "foo".into(), - default => "baz".to_string(), - }; - assert_eq!(v, "foo"); - - let v = select! { - recv(r) -> _ => "foo".into(), - default => "baz".to_string(), - }; - assert_eq!(v, "baz"); -} - -#[test] -fn deref() { - use crossbeam_channel as cc; - - struct Sender(cc::Sender); - struct Receiver(cc::Receiver); - - impl Deref for Receiver { - type Target = cc::Receiver; - - fn deref(&self) -> &Self::Target { - &self.0 - } - } - - impl Deref for Sender { - type Target = cc::Sender; - - fn deref(&self) -> &Self::Target { - &self.0 - } - } - - let (s, r) = bounded::(0); - let (s, r) = (Sender(s), Receiver(r)); - - select! { - send(s, 0) -> _ => panic!(), - recv(r) -> _ => panic!(), - default => {} - } -} - -#[test] -fn result_types() { - let (s, _) = bounded::(0); - let (_, r) = bounded::(0); - - select! { - recv(r) -> res => drop::>(res), - } - select! { - recv(r) -> res => drop::>(res), - default => {} - } - select! { - recv(r) -> res => drop::>(res), - default(ms(0)) => {} - } - - select! { - send(s, 0) -> res => drop::>>(res), - } - select! { - send(s, 0) -> res => drop::>>(res), - default => {} - } - select! { - send(s, 0) -> res => drop::>>(res), - default(ms(0)) => {} - } - - select! { - send(s, 0) -> res => drop::>>(res), - recv(r) -> res => drop::>(res), - } -} - -#[test] -fn try_recv() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - select! { - recv(r) -> _ => panic!(), - default => {} - } - thread::sleep(ms(1500)); - select! { - recv(r) -> v => assert_eq!(v, Ok(7)), - default => panic!(), - } - thread::sleep(ms(500)); - select! { - recv(r) -> v => assert_eq!(v, Err(RecvError)), - default => panic!(), - } - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - select! { - send(s, 7) -> res => res.unwrap(), - } - }); - }) - .unwrap(); -} - -#[test] -fn recv() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - select! { - recv(r) -> v => assert_eq!(v, Ok(7)), - } - thread::sleep(ms(1000)); - select! { - recv(r) -> v => assert_eq!(v, Ok(8)), - } - thread::sleep(ms(1000)); - select! { - recv(r) -> v => assert_eq!(v, Ok(9)), - } - select! { - recv(r) -> v => assert_eq!(v, Err(RecvError)), - } - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - select! { - send(s, 7) -> res => res.unwrap(), - } - select! { - send(s, 8) -> res => res.unwrap(), - } - select! { - send(s, 9) -> res => res.unwrap(), - } - }); - }) - .unwrap(); -} - -#[test] -fn recv_timeout() { - let (s, r) = bounded::(0); - - scope(|scope| { - scope.spawn(move |_| { - select! { - recv(r) -> _ => panic!(), - default(ms(1000)) => {} - } - select! { - recv(r) -> v => assert_eq!(v, Ok(7)), - default(ms(1000)) => panic!(), - } - select! { - recv(r) -> v => assert_eq!(v, Err(RecvError)), - default(ms(1000)) => panic!(), - } - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - select! { - send(s, 7) -> res => res.unwrap(), - } - }); - }) - .unwrap(); -} - -#[test] -fn try_send() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - select! { - send(s, 7) -> _ => panic!(), - default => {} - } - thread::sleep(ms(1500)); - select! { - send(s, 8) -> res => res.unwrap(), - default => panic!(), - } - thread::sleep(ms(500)); - select! { - send(s, 8) -> res => assert_eq!(res, Err(SendError(8))), - default => panic!(), - } - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - select! { - recv(r) -> v => assert_eq!(v, Ok(8)), - } - }); - }) - .unwrap(); -} - -#[test] -fn send() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - select! { - send(s, 7) -> res => res.unwrap(), - } - thread::sleep(ms(1000)); - select! { - send(s, 8) -> res => res.unwrap(), - } - thread::sleep(ms(1000)); - select! { - send(s, 9) -> res => res.unwrap(), - } - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - select! { - recv(r) -> v => assert_eq!(v, Ok(7)), - } - select! { - recv(r) -> v => assert_eq!(v, Ok(8)), - } - select! { - recv(r) -> v => assert_eq!(v, Ok(9)), - } - }); - }) - .unwrap(); -} - -#[test] -fn send_timeout() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - select! { - send(s, 7) -> _ => panic!(), - default(ms(1000)) => {} - } - select! { - send(s, 8) -> res => res.unwrap(), - default(ms(1000)) => panic!(), - } - select! { - send(s, 9) -> res => assert_eq!(res, Err(SendError(9))), - default(ms(1000)) => panic!(), - } - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - select! { - recv(r) -> v => assert_eq!(v, Ok(8)), - } - }); - }) - .unwrap(); -} - -#[test] -fn disconnect_wakes_sender() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - select! { - send(s, ()) -> res => assert_eq!(res, Err(SendError(()))), - } - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - drop(r); - }); - }) - .unwrap(); -} - -#[test] -fn disconnect_wakes_receiver() { - let (s, r) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(move |_| { - select! { - recv(r) -> res => assert_eq!(res, Err(RecvError)), - } - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - drop(s); - }); - }) - .unwrap(); -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/tests/select.rs cargo-0.47.0/vendor/crossbeam-channel/tests/select.rs --- cargo-0.44.1/vendor/crossbeam-channel/tests/select.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/tests/select.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1304 +0,0 @@ -//! Tests for channel selection using the `Select` struct. - -extern crate crossbeam_channel; -extern crate crossbeam_utils; - -use std::any::Any; -use std::cell::Cell; -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_channel::{after, bounded, tick, unbounded, Receiver, Select, TryRecvError}; -use crossbeam_utils::thread::scope; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn smoke1() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - s1.send(1).unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(1)), - i if i == oper2 => panic!(), - _ => unreachable!(), - } - - s2.send(2).unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => panic!(), - i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(2)), - _ => unreachable!(), - } -} - -#[test] -fn smoke2() { - let (_s1, r1) = unbounded::(); - let (_s2, r2) = unbounded::(); - let (_s3, r3) = unbounded::(); - let (_s4, r4) = unbounded::(); - let (s5, r5) = unbounded::(); - - s5.send(5).unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper3 = sel.recv(&r3); - let oper4 = sel.recv(&r4); - let oper5 = sel.recv(&r5); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => panic!(), - i if i == oper2 => panic!(), - i if i == oper3 => panic!(), - i if i == oper4 => panic!(), - i if i == oper5 => assert_eq!(oper.recv(&r5), Ok(5)), - _ => unreachable!(), - } -} - -#[test] -fn disconnected() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - scope(|scope| { - scope.spawn(|_| { - drop(s1); - thread::sleep(ms(500)); - s2.send(5).unwrap(); - }); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.recv(&r1).is_err()), - i if i == oper2 => panic!(), - _ => unreachable!(), - }, - } - - r2.recv().unwrap(); - }) - .unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.recv(&r1).is_err()), - i if i == oper2 => panic!(), - _ => unreachable!(), - }, - } - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - drop(s2); - }); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.recv(&r2).is_err()), - _ => unreachable!(), - }, - } - }) - .unwrap(); -} - -#[test] -fn default() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - let mut sel = Select::new(); - let _oper1 = sel.recv(&r1); - let _oper2 = sel.recv(&r2); - let oper = sel.try_select(); - match oper { - Err(_) => {} - Ok(_) => panic!(), - } - - drop(s1); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.try_select(); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.recv(&r1).is_err()), - i if i == oper2 => panic!(), - _ => unreachable!(), - }, - } - - s2.send(2).unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r2); - let oper = sel.try_select(); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert_eq!(oper.recv(&r2), Ok(2)), - _ => unreachable!(), - }, - } - - let mut sel = Select::new(); - let _oper1 = sel.recv(&r2); - let oper = sel.try_select(); - match oper { - Err(_) => {} - Ok(_) => panic!(), - } - - let mut sel = Select::new(); - let oper = sel.try_select(); - match oper { - Err(_) => {} - Ok(_) => panic!(), - } -} - -#[test] -fn timeout() { - let (_s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(1500)); - s2.send(2).unwrap(); - }); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => {} - Ok(oper) => match oper.index() { - i if i == oper1 => panic!(), - i if i == oper2 => panic!(), - _ => unreachable!(), - }, - } - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => panic!(), - i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(2)), - _ => unreachable!(), - }, - } - }) - .unwrap(); - - scope(|scope| { - let (s, r) = unbounded::(); - - scope.spawn(move |_| { - thread::sleep(ms(500)); - drop(s); - }); - - let mut sel = Select::new(); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => { - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.try_select(); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.recv(&r).is_err()), - _ => unreachable!(), - }, - } - } - Ok(_) => unreachable!(), - } - }) - .unwrap(); -} - -#[test] -fn default_when_disconnected() { - let (_, r) = unbounded::(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.try_select(); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.recv(&r).is_err()), - _ => unreachable!(), - }, - } - - let (_, r) = unbounded::(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.recv(&r).is_err()), - _ => unreachable!(), - }, - } - - let (s, _) = bounded::(0); - - let mut sel = Select::new(); - let oper1 = sel.send(&s); - let oper = sel.try_select(); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.send(&s, 0).is_err()), - _ => unreachable!(), - }, - } - - let (s, _) = bounded::(0); - - let mut sel = Select::new(); - let oper1 = sel.send(&s); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.send(&s, 0).is_err()), - _ => unreachable!(), - }, - } -} - -#[test] -fn default_only() { - let start = Instant::now(); - - let mut sel = Select::new(); - let oper = sel.try_select(); - assert!(oper.is_err()); - let now = Instant::now(); - assert!(now - start <= ms(50)); - - let start = Instant::now(); - let mut sel = Select::new(); - let oper = sel.select_timeout(ms(500)); - assert!(oper.is_err()); - let now = Instant::now(); - assert!(now - start >= ms(450)); - assert!(now - start <= ms(550)); -} - -#[test] -fn unblocks() { - let (s1, r1) = bounded::(0); - let (s2, r2) = bounded::(0); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - s2.send(2).unwrap(); - }); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => panic!(), - i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(2)), - _ => unreachable!(), - }, - } - }) - .unwrap(); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - assert_eq!(r1.recv().unwrap(), 1); - }); - - let mut sel = Select::new(); - let oper1 = sel.send(&s1); - let oper2 = sel.send(&s2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => oper.send(&s1, 1).unwrap(), - i if i == oper2 => panic!(), - _ => unreachable!(), - }, - } - }) - .unwrap(); -} - -#[test] -fn both_ready() { - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - s1.send(1).unwrap(); - assert_eq!(r2.recv().unwrap(), 2); - }); - - for _ in 0..2 { - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.send(&s2); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(1)), - i if i == oper2 => oper.send(&s2, 2).unwrap(), - _ => unreachable!(), - } - } - }) - .unwrap(); -} - -#[test] -fn loop_try() { - const RUNS: usize = 20; - - for _ in 0..RUNS { - let (s1, r1) = bounded::(0); - let (s2, r2) = bounded::(0); - let (s_end, r_end) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(|_| loop { - let mut done = false; - - let mut sel = Select::new(); - let oper1 = sel.send(&s1); - let oper = sel.try_select(); - match oper { - Err(_) => {} - Ok(oper) => match oper.index() { - i if i == oper1 => { - let _ = oper.send(&s1, 1); - done = true; - } - _ => unreachable!(), - }, - } - if done { - break; - } - - let mut sel = Select::new(); - let oper1 = sel.recv(&r_end); - let oper = sel.try_select(); - match oper { - Err(_) => {} - Ok(oper) => match oper.index() { - i if i == oper1 => { - let _ = oper.recv(&r_end); - done = true; - } - _ => unreachable!(), - }, - } - if done { - break; - } - }); - - scope.spawn(|_| loop { - if let Ok(x) = r2.try_recv() { - assert_eq!(x, 2); - break; - } - - let mut done = false; - let mut sel = Select::new(); - let oper1 = sel.recv(&r_end); - let oper = sel.try_select(); - match oper { - Err(_) => {} - Ok(oper) => match oper.index() { - i if i == oper1 => { - let _ = oper.recv(&r_end); - done = true; - } - _ => unreachable!(), - }, - } - if done { - break; - } - }); - - scope.spawn(|_| { - thread::sleep(ms(500)); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.send(&s2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => {} - Ok(oper) => match oper.index() { - i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(1)), - i if i == oper2 => assert!(oper.send(&s2, 2).is_ok()), - _ => unreachable!(), - }, - } - - drop(s_end); - }); - }) - .unwrap(); - } -} - -#[test] -fn cloning1() { - scope(|scope| { - let (s1, r1) = unbounded::(); - let (_s2, r2) = unbounded::(); - let (s3, r3) = unbounded::<()>(); - - scope.spawn(move |_| { - r3.recv().unwrap(); - drop(s1.clone()); - assert!(r3.try_recv().is_err()); - s1.send(1).unwrap(); - r3.recv().unwrap(); - }); - - s3.send(()).unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => drop(oper.recv(&r1)), - i if i == oper2 => drop(oper.recv(&r2)), - _ => unreachable!(), - } - - s3.send(()).unwrap(); - }) - .unwrap(); -} - -#[test] -fn cloning2() { - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = unbounded::<()>(); - let (_s3, _r3) = unbounded::<()>(); - - scope(|scope| { - scope.spawn(move |_| { - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => panic!(), - i if i == oper2 => drop(oper.recv(&r2)), - _ => unreachable!(), - } - }); - - thread::sleep(ms(500)); - drop(s1.clone()); - s2.send(()).unwrap(); - }) - .unwrap(); -} - -#[test] -fn preflight1() { - let (s, r) = unbounded(); - s.send(()).unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => drop(oper.recv(&r)), - _ => unreachable!(), - } -} - -#[test] -fn preflight2() { - let (s, r) = unbounded(); - drop(s.clone()); - s.send(()).unwrap(); - drop(s); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => assert_eq!(oper.recv(&r), Ok(())), - _ => unreachable!(), - } - - assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); -} - -#[test] -fn preflight3() { - let (s, r) = unbounded(); - drop(s.clone()); - s.send(()).unwrap(); - drop(s); - r.recv().unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => assert!(oper.recv(&r).is_err()), - _ => unreachable!(), - } -} - -#[test] -fn duplicate_operations() { - let (s, r) = unbounded::(); - let hit = vec![Cell::new(false); 4]; - - while hit.iter().map(|h| h.get()).any(|hit| !hit) { - let mut sel = Select::new(); - let oper0 = sel.recv(&r); - let oper1 = sel.recv(&r); - let oper2 = sel.send(&s); - let oper3 = sel.send(&s); - let oper = sel.select(); - match oper.index() { - i if i == oper0 => { - assert!(oper.recv(&r).is_ok()); - hit[0].set(true); - } - i if i == oper1 => { - assert!(oper.recv(&r).is_ok()); - hit[1].set(true); - } - i if i == oper2 => { - assert!(oper.send(&s, 0).is_ok()); - hit[2].set(true); - } - i if i == oper3 => { - assert!(oper.send(&s, 0).is_ok()); - hit[3].set(true); - } - _ => unreachable!(), - } - } -} - -#[test] -fn nesting() { - let (s, r) = unbounded::(); - - let mut sel = Select::new(); - let oper1 = sel.send(&s); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => { - assert!(oper.send(&s, 0).is_ok()); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => { - assert_eq!(oper.recv(&r), Ok(0)); - - let mut sel = Select::new(); - let oper1 = sel.send(&s); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => { - assert!(oper.send(&s, 1).is_ok()); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => { - assert_eq!(oper.recv(&r), Ok(1)); - } - _ => unreachable!(), - } - } - _ => unreachable!(), - } - } - _ => unreachable!(), - } - } - _ => unreachable!(), - } -} - -#[test] -fn stress_recv() { - const COUNT: usize = 10_000; - - let (s1, r1) = unbounded(); - let (s2, r2) = bounded(5); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - s1.send(i).unwrap(); - r3.recv().unwrap(); - - s2.send(i).unwrap(); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert_eq!(oper.recv(&r1), Ok(i)), - ix if ix == oper2 => assert_eq!(oper.recv(&r2), Ok(i)), - _ => unreachable!(), - } - - s3.send(()).unwrap(); - } - } - }) - .unwrap(); -} - -#[test] -fn stress_send() { - const COUNT: usize = 10_000; - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - assert_eq!(r1.recv().unwrap(), i); - assert_eq!(r2.recv().unwrap(), i); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - let mut sel = Select::new(); - let oper1 = sel.send(&s1); - let oper2 = sel.send(&s2); - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert!(oper.send(&s1, i).is_ok()), - ix if ix == oper2 => assert!(oper.send(&s2, i).is_ok()), - _ => unreachable!(), - } - } - s3.send(()).unwrap(); - } - }) - .unwrap(); -} - -#[test] -fn stress_mixed() { - const COUNT: usize = 10_000; - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - s1.send(i).unwrap(); - assert_eq!(r2.recv().unwrap(), i); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.send(&s2); - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert_eq!(oper.recv(&r1), Ok(i)), - ix if ix == oper2 => assert!(oper.send(&s2, i).is_ok()), - _ => unreachable!(), - } - } - s3.send(()).unwrap(); - } - }) - .unwrap(); -} - -#[test] -fn stress_timeout_two_threads() { - const COUNT: usize = 20; - - let (s, r) = bounded(2); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(500)); - } - - let done = false; - while !done { - let mut sel = Select::new(); - let oper1 = sel.send(&s); - let oper = sel.select_timeout(ms(100)); - match oper { - Err(_) => {} - Ok(oper) => match oper.index() { - ix if ix == oper1 => { - assert!(oper.send(&s, i).is_ok()); - break; - } - _ => unreachable!(), - }, - } - } - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(500)); - } - - let mut done = false; - while !done { - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.select_timeout(ms(100)); - match oper { - Err(_) => {} - Ok(oper) => match oper.index() { - ix if ix == oper1 => { - assert_eq!(oper.recv(&r), Ok(i)); - done = true; - } - _ => unreachable!(), - }, - } - } - } - }); - }) - .unwrap(); -} - -#[test] -fn send_recv_same_channel() { - let (s, r) = bounded::(0); - let mut sel = Select::new(); - let oper1 = sel.send(&s); - let oper2 = sel.recv(&r); - let oper = sel.select_timeout(ms(100)); - match oper { - Err(_) => {} - Ok(oper) => match oper.index() { - ix if ix == oper1 => panic!(), - ix if ix == oper2 => panic!(), - _ => unreachable!(), - }, - } - - let (s, r) = unbounded::(); - let mut sel = Select::new(); - let oper1 = sel.send(&s); - let oper2 = sel.recv(&r); - let oper = sel.select_timeout(ms(100)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - ix if ix == oper1 => assert!(oper.send(&s, 0).is_ok()), - ix if ix == oper2 => panic!(), - _ => unreachable!(), - }, - } -} - -#[test] -fn matching() { - const THREADS: usize = 44; - - let (s, r) = &bounded::(0); - - scope(|scope| { - for i in 0..THREADS { - scope.spawn(move |_| { - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper2 = sel.send(&s); - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert_ne!(oper.recv(&r), Ok(i)), - ix if ix == oper2 => assert!(oper.send(&s, i).is_ok()), - _ => unreachable!(), - } - }); - } - }) - .unwrap(); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn matching_with_leftover() { - const THREADS: usize = 55; - - let (s, r) = &bounded::(0); - - scope(|scope| { - for i in 0..THREADS { - scope.spawn(move |_| { - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper2 = sel.send(&s); - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert_ne!(oper.recv(&r), Ok(i)), - ix if ix == oper2 => assert!(oper.send(&s, i).is_ok()), - _ => unreachable!(), - } - }); - } - s.send(!0).unwrap(); - }) - .unwrap(); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn channel_through_channel() { - const COUNT: usize = 1000; - - type T = Box; - - for cap in 0..3 { - let (s, r) = bounded::(cap); - - scope(|scope| { - scope.spawn(move |_| { - let mut s = s; - - for _ in 0..COUNT { - let (new_s, new_r) = bounded(cap); - let new_r: T = Box::new(Some(new_r)); - - { - let mut sel = Select::new(); - let oper1 = sel.send(&s); - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert!(oper.send(&s, new_r).is_ok()), - _ => unreachable!(), - } - } - - s = new_s; - } - }); - - scope.spawn(move |_| { - let mut r = r; - - for _ in 0..COUNT { - let new = { - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => oper - .recv(&r) - .unwrap() - .downcast_mut::>>() - .unwrap() - .take() - .unwrap(), - _ => unreachable!(), - } - }; - r = new; - } - }); - }) - .unwrap(); - } -} - -#[test] -fn linearizable_try() { - const COUNT: usize = 100_000; - - for step in 0..2 { - let (start_s, start_r) = bounded::<()>(0); - let (end_s, end_r) = bounded::<()>(0); - - let ((s1, r1), (s2, r2)) = if step == 0 { - (bounded::(1), bounded::(1)) - } else { - (unbounded::(), unbounded::()) - }; - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..COUNT { - start_s.send(()).unwrap(); - - s1.send(1).unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.try_select(); - match oper { - Err(_) => unreachable!(), - Ok(oper) => match oper.index() { - ix if ix == oper1 => assert!(oper.recv(&r1).is_ok()), - ix if ix == oper2 => assert!(oper.recv(&r2).is_ok()), - _ => unreachable!(), - }, - } - - end_s.send(()).unwrap(); - let _ = r2.try_recv(); - } - }); - - for _ in 0..COUNT { - start_r.recv().unwrap(); - - s2.send(1).unwrap(); - let _ = r1.try_recv(); - - end_r.recv().unwrap(); - } - }) - .unwrap(); - } -} - -#[test] -fn linearizable_timeout() { - const COUNT: usize = 100_000; - - for step in 0..2 { - let (start_s, start_r) = bounded::<()>(0); - let (end_s, end_r) = bounded::<()>(0); - - let ((s1, r1), (s2, r2)) = if step == 0 { - (bounded::(1), bounded::(1)) - } else { - (unbounded::(), unbounded::()) - }; - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..COUNT { - start_s.send(()).unwrap(); - - s1.send(1).unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select_timeout(ms(0)); - match oper { - Err(_) => unreachable!(), - Ok(oper) => match oper.index() { - ix if ix == oper1 => assert!(oper.recv(&r1).is_ok()), - ix if ix == oper2 => assert!(oper.recv(&r2).is_ok()), - _ => unreachable!(), - }, - } - - end_s.send(()).unwrap(); - let _ = r2.try_recv(); - } - }); - - for _ in 0..COUNT { - start_r.recv().unwrap(); - - s2.send(1).unwrap(); - let _ = r1.try_recv(); - - end_r.recv().unwrap(); - } - }) - .unwrap(); - } -} - -#[test] -fn fairness1() { - const COUNT: usize = 10_000; - - let (s1, r1) = bounded::<()>(COUNT); - let (s2, r2) = unbounded::<()>(); - - for _ in 0..COUNT { - s1.send(()).unwrap(); - s2.send(()).unwrap(); - } - - let hits = vec![Cell::new(0usize); 4]; - for _ in 0..COUNT { - let after = after(ms(0)); - let tick = tick(ms(0)); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper3 = sel.recv(&after); - let oper4 = sel.recv(&tick); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => { - oper.recv(&r1).unwrap(); - hits[0].set(hits[0].get() + 1); - } - i if i == oper2 => { - oper.recv(&r2).unwrap(); - hits[1].set(hits[1].get() + 1); - } - i if i == oper3 => { - oper.recv(&after).unwrap(); - hits[2].set(hits[2].get() + 1); - } - i if i == oper4 => { - oper.recv(&tick).unwrap(); - hits[3].set(hits[3].get() + 1); - } - _ => unreachable!(), - } - } - assert!(hits.iter().all(|x| x.get() >= COUNT / hits.len() / 2)); -} - -#[test] -fn fairness2() { - const COUNT: usize = 10_000; - - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = bounded::<()>(1); - let (s3, r3) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..COUNT { - let mut sel = Select::new(); - let mut oper1 = None; - let mut oper2 = None; - if s1.is_empty() { - oper1 = Some(sel.send(&s1)); - } - if s2.is_empty() { - oper2 = Some(sel.send(&s2)); - } - let oper3 = sel.send(&s3); - let oper = sel.select(); - match oper.index() { - i if Some(i) == oper1 => assert!(oper.send(&s1, ()).is_ok()), - i if Some(i) == oper2 => assert!(oper.send(&s2, ()).is_ok()), - i if i == oper3 => assert!(oper.send(&s3, ()).is_ok()), - _ => unreachable!(), - } - } - }); - - let hits = vec![Cell::new(0usize); 3]; - for _ in 0..COUNT { - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper3 = sel.recv(&r3); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => { - oper.recv(&r1).unwrap(); - hits[0].set(hits[0].get() + 1); - } - i if i == oper2 => { - oper.recv(&r2).unwrap(); - hits[1].set(hits[1].get() + 1); - } - i if i == oper3 => { - oper.recv(&r3).unwrap(); - hits[2].set(hits[2].get() + 1); - } - _ => unreachable!(), - } - } - assert!(hits.iter().all(|x| x.get() >= COUNT / hits.len() / 50)); - }) - .unwrap(); -} - -#[test] -fn sync_and_clone() { - const THREADS: usize = 20; - - let (s, r) = &bounded::(0); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper2 = sel.send(&s); - let sel = &sel; - - scope(|scope| { - for i in 0..THREADS { - scope.spawn(move |_| { - let mut sel = sel.clone(); - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert_ne!(oper.recv(&r), Ok(i)), - ix if ix == oper2 => assert!(oper.send(&s, i).is_ok()), - _ => unreachable!(), - } - }); - } - }) - .unwrap(); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn send_and_clone() { - const THREADS: usize = 20; - - let (s, r) = &bounded::(0); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper2 = sel.send(&s); - - scope(|scope| { - for i in 0..THREADS { - let mut sel = sel.clone(); - scope.spawn(move |_| { - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert_ne!(oper.recv(&r), Ok(i)), - ix if ix == oper2 => assert!(oper.send(&s, i).is_ok()), - _ => unreachable!(), - } - }); - } - }) - .unwrap(); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn reuse() { - const COUNT: usize = 10_000; - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - s1.send(i).unwrap(); - assert_eq!(r2.recv().unwrap(), i); - r3.recv().unwrap(); - } - }); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.send(&s2); - - for i in 0..COUNT { - for _ in 0..2 { - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert_eq!(oper.recv(&r1), Ok(i)), - ix if ix == oper2 => assert!(oper.send(&s2, i).is_ok()), - _ => unreachable!(), - } - } - s3.send(()).unwrap(); - } - }) - .unwrap(); -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/tests/thread_locals.rs cargo-0.47.0/vendor/crossbeam-channel/tests/thread_locals.rs --- cargo-0.44.1/vendor/crossbeam-channel/tests/thread_locals.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/tests/thread_locals.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -//! Tests that make sure accessing thread-locals while exiting the thread doesn't cause panics. - -#[macro_use] -extern crate crossbeam_channel; -extern crate crossbeam_utils; - -use std::thread; -use std::time::Duration; - -use crossbeam_channel::unbounded; -use crossbeam_utils::thread::scope; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -#[cfg_attr(target_os = "macos", ignore = "TLS is destroyed too early on macOS")] -fn use_while_exiting() { - struct Foo; - - impl Drop for Foo { - fn drop(&mut self) { - // A blocking operation after the thread-locals have been dropped. This will attempt to - // use the thread-locals and must not panic. - let (_s, r) = unbounded::<()>(); - select! { - recv(r) -> _ => {} - default(ms(100)) => {} - } - } - } - - thread_local! { - static FOO: Foo = Foo; - } - - let (s, r) = unbounded::<()>(); - - scope(|scope| { - scope.spawn(|_| { - // First initialize `FOO`, then the thread-locals related to crossbeam-channel. - FOO.with(|_| ()); - r.recv().unwrap(); - // At thread exit, thread-locals related to crossbeam-channel get dropped first and - // `FOO` is dropped last. - }); - - scope.spawn(|_| { - thread::sleep(ms(100)); - s.send(()).unwrap(); - }); - }) - .unwrap(); -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/tests/tick.rs cargo-0.47.0/vendor/crossbeam-channel/tests/tick.rs --- cargo-0.44.1/vendor/crossbeam-channel/tests/tick.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/tests/tick.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,353 +0,0 @@ -//! Tests for the tick channel flavor. - -#[macro_use] -extern crate crossbeam_channel; -extern crate crossbeam_utils; -extern crate rand; - -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_channel::{after, tick, Select, TryRecvError}; -use crossbeam_utils::thread::scope; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn fire() { - let start = Instant::now(); - let r = tick(ms(50)); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - thread::sleep(ms(100)); - - let fired = r.try_recv().unwrap(); - assert!(start < fired); - assert!(fired - start >= ms(50)); - - let now = Instant::now(); - assert!(fired < now); - assert!(now - fired >= ms(50)); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - - select! { - recv(r) -> _ => panic!(), - default => {} - } - - select! { - recv(r) -> _ => {} - recv(tick(ms(200))) -> _ => panic!(), - } -} - -#[test] -fn intervals() { - let start = Instant::now(); - let r = tick(ms(50)); - - let t1 = r.recv().unwrap(); - assert!(start + ms(50) <= t1); - assert!(start + ms(100) > t1); - - thread::sleep(ms(300)); - let t2 = r.try_recv().unwrap(); - assert!(start + ms(100) <= t2); - assert!(start + ms(150) > t2); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - let t3 = r.recv().unwrap(); - assert!(start + ms(400) <= t3); - assert!(start + ms(450) > t3); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn capacity() { - const COUNT: usize = 10; - - for i in 0..COUNT { - let r = tick(ms(i as u64)); - assert_eq!(r.capacity(), Some(1)); - } -} - -#[test] -fn len_empty_full() { - let r = tick(ms(50)); - - assert_eq!(r.len(), 0); - assert_eq!(r.is_empty(), true); - assert_eq!(r.is_full(), false); - - thread::sleep(ms(100)); - - assert_eq!(r.len(), 1); - assert_eq!(r.is_empty(), false); - assert_eq!(r.is_full(), true); - - r.try_recv().unwrap(); - - assert_eq!(r.len(), 0); - assert_eq!(r.is_empty(), true); - assert_eq!(r.is_full(), false); -} - -#[test] -fn try_recv() { - let r = tick(ms(200)); - assert!(r.try_recv().is_err()); - - thread::sleep(ms(100)); - assert!(r.try_recv().is_err()); - - thread::sleep(ms(200)); - assert!(r.try_recv().is_ok()); - assert!(r.try_recv().is_err()); - - thread::sleep(ms(200)); - assert!(r.try_recv().is_ok()); - assert!(r.try_recv().is_err()); -} - -#[test] -fn recv() { - let start = Instant::now(); - let r = tick(ms(50)); - - let fired = r.recv().unwrap(); - assert!(start < fired); - assert!(fired - start >= ms(50)); - - let now = Instant::now(); - assert!(fired < now); - assert!(now - fired < fired - start); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn recv_timeout() { - let start = Instant::now(); - let r = tick(ms(200)); - - assert!(r.recv_timeout(ms(100)).is_err()); - let now = Instant::now(); - assert!(now - start >= ms(100)); - assert!(now - start <= ms(150)); - - let fired = r.recv_timeout(ms(200)).unwrap(); - assert!(fired - start >= ms(200)); - assert!(fired - start <= ms(250)); - - assert!(r.recv_timeout(ms(100)).is_err()); - let now = Instant::now(); - assert!(now - start >= ms(300)); - assert!(now - start <= ms(350)); - - let fired = r.recv_timeout(ms(200)).unwrap(); - assert!(fired - start >= ms(400)); - assert!(fired - start <= ms(450)); -} - -#[test] -fn recv_two() { - let r1 = tick(ms(50)); - let r2 = tick(ms(50)); - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..10 { - select! { - recv(r1) -> _ => {} - recv(r2) -> _ => {} - } - } - }); - scope.spawn(|_| { - for _ in 0..10 { - select! { - recv(r1) -> _ => {} - recv(r2) -> _ => {} - } - } - }); - }) - .unwrap(); -} - -#[test] -fn recv_race() { - select! { - recv(tick(ms(50))) -> _ => {} - recv(tick(ms(100))) -> _ => panic!(), - } - - select! { - recv(tick(ms(100))) -> _ => panic!(), - recv(tick(ms(50))) -> _ => {} - } -} - -#[test] -fn stress_default() { - const COUNT: usize = 10; - - for _ in 0..COUNT { - select! { - recv(tick(ms(0))) -> _ => {} - default => panic!(), - } - } - - for _ in 0..COUNT { - select! { - recv(tick(ms(100))) -> _ => panic!(), - default => {} - } - } -} - -#[test] -fn select() { - const THREADS: usize = 4; - - let hits = AtomicUsize::new(0); - let r1 = tick(ms(200)); - let r2 = tick(ms(300)); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - let timeout = after(ms(1100)); - loop { - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper3 = sel.recv(&timeout); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => { - oper.recv(&r1).unwrap(); - hits.fetch_add(1, Ordering::SeqCst); - } - i if i == oper2 => { - oper.recv(&r2).unwrap(); - hits.fetch_add(1, Ordering::SeqCst); - } - i if i == oper3 => { - oper.recv(&timeout).unwrap(); - break; - } - _ => unreachable!(), - } - } - }); - } - }) - .unwrap(); - - assert_eq!(hits.load(Ordering::SeqCst), 8); -} - -#[test] -fn ready() { - const THREADS: usize = 4; - - let hits = AtomicUsize::new(0); - let r1 = tick(ms(200)); - let r2 = tick(ms(300)); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - let timeout = after(ms(1100)); - 'outer: loop { - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - sel.recv(&timeout); - loop { - match sel.ready() { - 0 => { - if r1.try_recv().is_ok() { - hits.fetch_add(1, Ordering::SeqCst); - break; - } - } - 1 => { - if r2.try_recv().is_ok() { - hits.fetch_add(1, Ordering::SeqCst); - break; - } - } - 2 => { - if timeout.try_recv().is_ok() { - break 'outer; - } - } - _ => unreachable!(), - } - } - } - }); - } - }) - .unwrap(); - - assert_eq!(hits.load(Ordering::SeqCst), 8); -} - -#[test] -fn fairness() { - const COUNT: usize = 30; - - for &dur in &[0, 1] { - let mut hits = [0usize; 2]; - - for _ in 0..COUNT { - let r1 = tick(ms(dur)); - let r2 = tick(ms(dur)); - - for _ in 0..COUNT { - select! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - } - } - } - - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); - } -} - -#[test] -fn fairness_duplicates() { - const COUNT: usize = 30; - - for &dur in &[0, 1] { - let mut hits = [0usize; 5]; - - for _ in 0..COUNT { - let r = tick(ms(dur)); - - for _ in 0..COUNT { - select! { - recv(r) -> _ => hits[0] += 1, - recv(r) -> _ => hits[1] += 1, - recv(r) -> _ => hits[2] += 1, - recv(r) -> _ => hits[3] += 1, - recv(r) -> _ => hits[4] += 1, - } - } - } - - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); - } -} diff -Nru cargo-0.44.1/vendor/crossbeam-channel/tests/zero.rs cargo-0.47.0/vendor/crossbeam-channel/tests/zero.rs --- cargo-0.44.1/vendor/crossbeam-channel/tests/zero.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/crossbeam-channel/tests/zero.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,559 +0,0 @@ -//! Tests for the zero channel flavor. - -#[macro_use] -extern crate crossbeam_channel; -extern crate crossbeam_utils; -extern crate rand; - -use std::any::Any; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::thread; -use std::time::Duration; - -use crossbeam_channel::{bounded, Receiver}; -use crossbeam_channel::{RecvError, RecvTimeoutError, TryRecvError}; -use crossbeam_channel::{SendError, SendTimeoutError, TrySendError}; -use crossbeam_utils::thread::scope; -use rand::{thread_rng, Rng}; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn smoke() { - let (s, r) = bounded(0); - assert_eq!(s.try_send(7), Err(TrySendError::Full(7))); - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn capacity() { - let (s, r) = bounded::<()>(0); - assert_eq!(s.capacity(), Some(0)); - assert_eq!(r.capacity(), Some(0)); -} - -#[test] -fn len_empty_full() { - let (s, r) = bounded(0); - - assert_eq!(s.len(), 0); - assert_eq!(s.is_empty(), true); - assert_eq!(s.is_full(), true); - assert_eq!(r.len(), 0); - assert_eq!(r.is_empty(), true); - assert_eq!(r.is_full(), true); - - scope(|scope| { - scope.spawn(|_| s.send(0).unwrap()); - scope.spawn(|_| r.recv().unwrap()); - }) - .unwrap(); - - assert_eq!(s.len(), 0); - assert_eq!(s.is_empty(), true); - assert_eq!(s.is_full(), true); - assert_eq!(r.len(), 0); - assert_eq!(r.is_empty(), true); - assert_eq!(r.is_full(), true); -} - -#[test] -fn try_recv() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - thread::sleep(ms(1500)); - assert_eq!(r.try_recv(), Ok(7)); - thread::sleep(ms(500)); - assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - s.send(7).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn recv() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv(), Ok(7)); - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(8)); - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(9)); - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - s.send(7).unwrap(); - s.send(8).unwrap(); - s.send(9).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn recv_timeout() { - let (s, r) = bounded::(0); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout)); - assert_eq!(r.recv_timeout(ms(1000)), Ok(7)); - assert_eq!( - r.recv_timeout(ms(1000)), - Err(RecvTimeoutError::Disconnected) - ); - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - s.send(7).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn try_send() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(s.try_send(7), Err(TrySendError::Full(7))); - thread::sleep(ms(1500)); - assert_eq!(s.try_send(8), Ok(())); - thread::sleep(ms(500)); - assert_eq!(s.try_send(9), Err(TrySendError::Disconnected(9))); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(8)); - }); - }) - .unwrap(); -} - -#[test] -fn send() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - s.send(7).unwrap(); - thread::sleep(ms(1000)); - s.send(8).unwrap(); - thread::sleep(ms(1000)); - s.send(9).unwrap(); - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - assert_eq!(r.recv(), Ok(7)); - assert_eq!(r.recv(), Ok(8)); - assert_eq!(r.recv(), Ok(9)); - }); - }) - .unwrap(); -} - -#[test] -fn send_timeout() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!( - s.send_timeout(7, ms(1000)), - Err(SendTimeoutError::Timeout(7)) - ); - assert_eq!(s.send_timeout(8, ms(1000)), Ok(())); - assert_eq!( - s.send_timeout(9, ms(1000)), - Err(SendTimeoutError::Disconnected(9)) - ); - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - assert_eq!(r.recv(), Ok(8)); - }); - }) - .unwrap(); -} - -#[test] -fn len() { - const COUNT: usize = 25_000; - - let (s, r) = bounded(0); - - assert_eq!(s.len(), 0); - assert_eq!(r.len(), 0); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - assert_eq!(r.recv(), Ok(i)); - assert_eq!(r.len(), 0); - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - s.send(i).unwrap(); - assert_eq!(s.len(), 0); - } - }); - }) - .unwrap(); - - assert_eq!(s.len(), 0); - assert_eq!(r.len(), 0); -} - -#[test] -fn disconnect_wakes_sender() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(s.send(()), Err(SendError(()))); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - drop(r); - }); - }) - .unwrap(); -} - -#[test] -fn disconnect_wakes_receiver() { - let (s, r) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - drop(s); - }); - }) - .unwrap(); -} - -#[test] -fn spsc() { - const COUNT: usize = 100_000; - - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - for i in 0..COUNT { - assert_eq!(r.recv(), Ok(i)); - } - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - for i in 0..COUNT { - s.send(i).unwrap(); - } - }); - }) - .unwrap(); -} - -#[test] -fn mpmc() { - const COUNT: usize = 25_000; - const THREADS: usize = 4; - - let (s, r) = bounded::(0); - let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..COUNT { - let n = r.recv().unwrap(); - v[n].fetch_add(1, Ordering::SeqCst); - } - }); - } - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..COUNT { - s.send(i).unwrap(); - } - }); - } - }) - .unwrap(); - - for c in v { - assert_eq!(c.load(Ordering::SeqCst), THREADS); - } -} - -#[test] -fn stress_oneshot() { - const COUNT: usize = 10_000; - - for _ in 0..COUNT { - let (s, r) = bounded(1); - - scope(|scope| { - scope.spawn(|_| r.recv().unwrap()); - scope.spawn(|_| s.send(0).unwrap()); - }) - .unwrap(); - } -} - -#[test] -fn stress_iter() { - const COUNT: usize = 1000; - - let (request_s, request_r) = bounded(0); - let (response_s, response_r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - let mut count = 0; - loop { - for x in response_r.try_iter() { - count += x; - if count == COUNT { - return; - } - } - let _ = request_s.try_send(()); - } - }); - - for _ in request_r.iter() { - if response_s.send(1).is_err() { - break; - } - } - }) - .unwrap(); -} - -#[test] -fn stress_timeout_two_threads() { - const COUNT: usize = 100; - - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(50)); - } - loop { - if let Ok(()) = s.send_timeout(i, ms(10)) { - break; - } - } - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(50)); - } - loop { - if let Ok(x) = r.recv_timeout(ms(10)) { - assert_eq!(x, i); - break; - } - } - } - }); - }) - .unwrap(); -} - -#[test] -fn drops() { - static DROPS: AtomicUsize = AtomicUsize::new(0); - - #[derive(Debug, PartialEq)] - struct DropCounter; - - impl Drop for DropCounter { - fn drop(&mut self) { - DROPS.fetch_add(1, Ordering::SeqCst); - } - } - - let mut rng = thread_rng(); - - for _ in 0..100 { - let steps = rng.gen_range(0, 3_000); - - DROPS.store(0, Ordering::SeqCst); - let (s, r) = bounded::(0); - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..steps { - r.recv().unwrap(); - } - }); - - scope.spawn(|_| { - for _ in 0..steps { - s.send(DropCounter).unwrap(); - } - }); - }) - .unwrap(); - - assert_eq!(DROPS.load(Ordering::SeqCst), steps); - drop(s); - drop(r); - assert_eq!(DROPS.load(Ordering::SeqCst), steps); - } -} - -#[test] -fn fairness() { - const COUNT: usize = 10_000; - - let (s1, r1) = bounded::<()>(0); - let (s2, r2) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(|_| { - let mut hits = [0usize; 2]; - for _ in 0..COUNT { - select! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); - }); - - let mut hits = [0usize; 2]; - for _ in 0..COUNT { - select! { - send(s1, ()) -> _ => hits[0] += 1, - send(s2, ()) -> _ => hits[1] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); - }) - .unwrap(); -} - -#[test] -fn fairness_duplicates() { - const COUNT: usize = 10_000; - - let (s, r) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(|_| { - let mut hits = [0usize; 5]; - for _ in 0..COUNT { - select! { - recv(r) -> _ => hits[0] += 1, - recv(r) -> _ => hits[1] += 1, - recv(r) -> _ => hits[2] += 1, - recv(r) -> _ => hits[3] += 1, - recv(r) -> _ => hits[4] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); - }); - - let mut hits = [0usize; 5]; - for _ in 0..COUNT { - select! { - send(s, ()) -> _ => hits[0] += 1, - send(s, ()) -> _ => hits[1] += 1, - send(s, ()) -> _ => hits[2] += 1, - send(s, ()) -> _ => hits[3] += 1, - send(s, ()) -> _ => hits[4] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); - }) - .unwrap(); -} - -#[test] -fn recv_in_send() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(100)); - r.recv() - }); - - scope.spawn(|_| { - thread::sleep(ms(500)); - s.send(()).unwrap(); - }); - - select! { - send(s, r.recv().unwrap()) -> _ => {} - } - }) - .unwrap(); -} - -#[test] -fn channel_through_channel() { - const COUNT: usize = 1000; - - type T = Box; - - let (s, r) = bounded::(0); - - scope(|scope| { - scope.spawn(move |_| { - let mut s = s; - - for _ in 0..COUNT { - let (new_s, new_r) = bounded(0); - let new_r: T = Box::new(Some(new_r)); - - s.send(new_r).unwrap(); - s = new_s; - } - }); - - scope.spawn(move |_| { - let mut r = r; - - for _ in 0..COUNT { - r = r - .recv() - .unwrap() - .downcast_mut::>>() - .unwrap() - .take() - .unwrap() - } - }); - }) - .unwrap(); -} diff -Nru cargo-0.44.1/vendor/curl/.cargo-checksum.json cargo-0.47.0/vendor/curl/.cargo-checksum.json --- cargo-0.44.1/vendor/curl/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"762e34611d2d5233a506a79072be944fddd057db2f18e04c0d6fa79e3fd466fd"} \ No newline at end of file +{"files":{},"package":"78baca05127a115136a9898e266988fc49ca7ea2c839f60fc6e1fc9df1599168"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/curl/Cargo.lock cargo-0.47.0/vendor/curl/Cargo.lock --- cargo-0.44.1/vendor/curl/Cargo.lock 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/curl/Cargo.lock 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,816 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "aho-corasick" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86" +dependencies = [ + "memchr", +] + +[[package]] +name = "anyhow" +version = "1.0.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b602bfe940d21c130f3895acd65221e8a61270debe89d628b9cb4e3ccb8569b" + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "autocfg" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" + +[[package]] +name = "base64" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" +dependencies = [ + "byteorder", +] + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "bumpalo" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" + +[[package]] +name = "byteorder" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" + +[[package]] +name = "cc" +version = "1.0.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66120af515773fb005778dc07c261bd201ec8ce50bd6e7144c927753fe013381" + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "cloudabi" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +dependencies = [ + "bitflags", +] + +[[package]] +name = "cmake" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e56268c17a6248366d66d4a47a3381369d068cce8409bb1716ed77ea32163bb" +dependencies = [ + "cc", +] + +[[package]] +name = "curl" +version = "0.4.33" +dependencies = [ + "anyhow", + "curl-sys", + "libc", + "mio", + "mio-extras", + "openssl-probe", + "openssl-sys", + "schannel", + "socket2", + "winapi 0.3.9", +] + +[[package]] +name = "curl-sys" +version = "0.4.36+curl-7.71.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68cad94adeb0c16558429c3c34a607acc9ea58e09a7b66310aabc9788fc5d721" +dependencies = [ + "cc", + "libc", + "libnghttp2-sys", + "libz-sys", + "mesalink", + "openssl-sys", + "pkg-config", + "vcpkg", + "winapi 0.3.9", +] + +[[package]] +name = "enum_to_u8_slice_derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8479a225129affae259452fd418b67af025ac86f60663a893baa407bc9897f43" +dependencies = [ + "quote 0.3.15", + "syn 0.11.11", +] + +[[package]] +name = "env_logger" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aafcde04e90a5226a6443b7aabdb016ba2f8307c847d524724bd9b346dd1a2d3" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "fuchsia-zircon" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" +dependencies = [ + "bitflags", + "fuchsia-zircon-sys", +] + +[[package]] +name = "fuchsia-zircon-sys" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" + +[[package]] +name = "hermit-abi" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9" +dependencies = [ + "libc", +] + +[[package]] +name = "humantime" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" +dependencies = [ + "quick-error", +] + +[[package]] +name = "iovec" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" +dependencies = [ + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85a7e2c92a4804dd459b86c339278d0fe87cf93757fae222c3fa3ae75458bc73" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "kernel32-sys" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +dependencies = [ + "winapi 0.2.8", + "winapi-build", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "libc" +version = "0.2.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2f02823cf78b754822df5f7f268fb59822e7296276d3e069d8e8cb26a14bd10" + +[[package]] +name = "libnghttp2-sys" +version = "0.1.4+1.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03624ec6df166e79e139a2310ca213283d6b3c30810c54844f307086d4488df1" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "libz-sys" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af67924b8dd885cccea261866c8ce5b74d239d272e154053ff927dae839f5ae9" +dependencies = [ + "cc", + "cmake", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "lock_api" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" + +[[package]] +name = "memchr" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" + +[[package]] +name = "mesalink" +version = "1.1.0-cratesio" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05616fdd96cc48e233f660ce28e936950163b21f28bde25649acf55de411970a" +dependencies = [ + "base64", + "bitflags", + "enum_to_u8_slice_derive", + "env_logger", + "lazy_static", + "libc", + "parking_lot", + "ring", + "rustls", + "sct", + "untrusted", + "walkdir", + "webpki", + "webpki-roots", +] + +[[package]] +name = "mio" +version = "0.6.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" +dependencies = [ + "cfg-if", + "fuchsia-zircon", + "fuchsia-zircon-sys", + "iovec", + "kernel32-sys", + "libc", + "log", + "miow", + "net2", + "slab", + "winapi 0.2.8", +] + +[[package]] +name = "mio-extras" +version = "2.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" +dependencies = [ + "lazycell", + "log", + "mio", + "slab", +] + +[[package]] +name = "miow" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" +dependencies = [ + "kernel32-sys", + "net2", + "winapi 0.2.8", + "ws2_32-sys", +] + +[[package]] +name = "net2" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" +dependencies = [ + "cfg-if", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "once_cell" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad" + +[[package]] +name = "openssl-probe" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" + +[[package]] +name = "openssl-src" +version = "111.10.2+1.1.1g" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a287fdb22e32b5b60624d4a5a7a02dbe82777f730ec0dbc42a0554326fef5a70" +dependencies = [ + "cc", +] + +[[package]] +name = "openssl-sys" +version = "0.9.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" +dependencies = [ + "autocfg", + "cc", + "libc", + "openssl-src", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "parking_lot" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" +dependencies = [ + "lock_api", + "parking_lot_core", + "rustc_version", +] + +[[package]] +name = "parking_lot_core" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" +dependencies = [ + "cfg-if", + "cloudabi", + "libc", + "redox_syscall", + "rustc_version", + "smallvec", + "winapi 0.3.9", +] + +[[package]] +name = "pkg-config" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" + +[[package]] +name = "proc-macro2" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04f5f085b5d71e2188cb8271e5da0161ad52c3f227a661a3c135fdf28e258b12" +dependencies = [ + "unicode-xid 0.2.1", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quote" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" + +[[package]] +name = "quote" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "redox_syscall" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" + +[[package]] +name = "regex" +version = "1.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", + "thread_local", +] + +[[package]] +name = "regex-syntax" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" + +[[package]] +name = "ring" +version = "0.16.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin", + "untrusted", + "web-sys", + "winapi 0.3.9", +] + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver", +] + +[[package]] +name = "rustls" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" +dependencies = [ + "base64", + "log", + "ring", + "sct", + "webpki", +] + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "schannel" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +dependencies = [ + "lazy_static", + "winapi 0.3.9", +] + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "sct" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + +[[package]] +name = "slab" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" + +[[package]] +name = "smallvec" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" +dependencies = [ + "maybe-uninit", +] + +[[package]] +name = "socket2" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "winapi 0.3.9", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "syn" +version = "0.11.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad" +dependencies = [ + "quote 0.3.15", + "synom", + "unicode-xid 0.0.4", +] + +[[package]] +name = "syn" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e69abc24912995b3038597a7a593be5053eb0fb44f3cc5beec0deb421790c1f4" +dependencies = [ + "proc-macro2", + "quote 1.0.7", + "unicode-xid 0.2.1", +] + +[[package]] +name = "synom" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6" +dependencies = [ + "unicode-xid 0.0.4", +] + +[[package]] +name = "termcolor" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "thread_local" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "unicode-xid" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc" + +[[package]] +name = "unicode-xid" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "vcpkg" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" + +[[package]] +name = "walkdir" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" +dependencies = [ + "same-file", + "winapi 0.3.9", + "winapi-util", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0563a9a4b071746dd5aedbc3a28c6fe9be4586fb3fbadb67c400d4f53c6b16c" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc71e4c5efa60fb9e74160e89b93353bc24059999c0ae0fb03affc39770310b0" +dependencies = [ + "bumpalo", + "lazy_static", + "log", + "proc-macro2", + "quote 1.0.7", + "syn 1.0.38", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97c57cefa5fa80e2ba15641578b44d36e7a64279bc5ed43c6dbaf329457a2ed2" +dependencies = [ + "quote 1.0.7", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841a6d1c35c6f596ccea1f82504a192a60378f64b3bb0261904ad8f2f5657556" +dependencies = [ + "proc-macro2", + "quote 1.0.7", + "syn 1.0.38", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93b162580e34310e5931c4b792560108b10fd14d64915d7fff8ff00180e70092" + +[[package]] +name = "web-sys" +version = "0.3.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dda38f4e5ca63eda02c059d243aa25b5f35ab98451e518c51612cd0f1bd19a47" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab146130f5f790d45f82aeeb09e55a256573373ec64409fc19a6fb82fb1032ae" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki-roots" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a262ae37dd9d60f60dd473d1158f9fbebf110ba7b6a5051c8160460f6043718b" +dependencies = [ + "webpki", +] + +[[package]] +name = "winapi" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-build" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi 0.3.9", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "ws2_32-sys" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" +dependencies = [ + "winapi 0.2.8", + "winapi-build", +] diff -Nru cargo-0.44.1/vendor/curl/Cargo.toml cargo-0.47.0/vendor/curl/Cargo.toml --- cargo-0.44.1/vendor/curl/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,21 +12,27 @@ [package] name = "curl" -version = "0.4.29" +version = "0.4.33" authors = ["Alex Crichton "] autotests = true description = "Rust bindings to libcurl for making HTTP requests" homepage = "https://github.com/alexcrichton/curl-rust" documentation = "https://docs.rs/curl" +readme = "README.md" categories = ["api-bindings", "web-programming::http-client"] license = "MIT" repository = "https://github.com/alexcrichton/curl-rust" +[[example]] +name = "ssl_proxy" +path = "examples/ssl_proxy.rs" +required-features = ["ssl"] + [[test]] name = "atexit" harness = false [dependencies.curl-sys] -version = "0.4.31" +version = "0.4.36" default-features = false [dependencies.libc] @@ -34,6 +40,9 @@ [dependencies.socket2] version = "0.3.7" +[dev-dependencies.anyhow] +version = "1.0.31" + [dev-dependencies.mio] version = "0.6" @@ -49,6 +58,7 @@ ssl = ["openssl-sys", "openssl-probe", "curl-sys/ssl"] static-curl = ["curl-sys/static-curl"] static-ssl = ["curl-sys/static-ssl"] +zlib-ng-compat = ["curl-sys/zlib-ng-compat", "static-curl"] [target."cfg(all(unix, not(target_os = \"macos\")))".dependencies.openssl-probe] version = "0.1.2" optional = true diff -Nru cargo-0.44.1/vendor/curl/ci/Dockerfile-linux64-curl cargo-0.47.0/vendor/curl/ci/Dockerfile-linux64-curl --- cargo-0.44.1/vendor/curl/ci/Dockerfile-linux64-curl 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl/ci/Dockerfile-linux64-curl 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,4 @@ -FROM ubuntu:14.04 +FROM ubuntu:16.04 RUN apt-get update RUN apt-get install -y --no-install-recommends \ diff -Nru cargo-0.44.1/vendor/curl/examples/ssl_proxy.rs cargo-0.47.0/vendor/curl/examples/ssl_proxy.rs --- cargo-0.44.1/vendor/curl/examples/ssl_proxy.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/curl/examples/ssl_proxy.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,29 @@ +extern crate anyhow; + +use anyhow::Result; + +fn main() -> Result<()> { + let mut handle = curl::easy::Easy::new(); + + let proxy_url = "https://fwdproxy"; + let proxy_port = 8082; + let cainfo = "/var/credentials/root/ca.pem"; + let sslcert = "/var/credentials/user/x509.pem"; + let sslkey = "/var/credentials/user/x509.pem"; + + handle.connect_timeout(std::time::Duration::from_secs(5))?; + handle.connect_only(true)?; + handle.verbose(true)?; + handle.url("https://www.google.com")?; + + handle.proxy(proxy_url)?; + handle.proxy_port(proxy_port)?; + handle.proxy_cainfo(&cainfo)?; + handle.proxy_sslcert(&sslcert)?; + handle.proxy_sslkey(&sslkey)?; + println!("ssl proxy setup done"); + + handle.perform()?; + println!("connected done"); + Ok(()) +} diff -Nru cargo-0.44.1/vendor/curl/.pc/disable-mesalink.patch/Cargo.toml cargo-0.47.0/vendor/curl/.pc/disable-mesalink.patch/Cargo.toml --- cargo-0.44.1/vendor/curl/.pc/disable-mesalink.patch/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl/.pc/disable-mesalink.patch/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,21 +12,27 @@ [package] name = "curl" -version = "0.4.29" +version = "0.4.33" authors = ["Alex Crichton "] autotests = true description = "Rust bindings to libcurl for making HTTP requests" homepage = "https://github.com/alexcrichton/curl-rust" documentation = "https://docs.rs/curl" +readme = "README.md" categories = ["api-bindings", "web-programming::http-client"] license = "MIT" repository = "https://github.com/alexcrichton/curl-rust" +[[example]] +name = "ssl_proxy" +path = "examples/ssl_proxy.rs" +required-features = ["ssl"] + [[test]] name = "atexit" harness = false [dependencies.curl-sys] -version = "0.4.31" +version = "0.4.36" default-features = false [dependencies.libc] @@ -34,6 +40,9 @@ [dependencies.socket2] version = "0.3.7" +[dev-dependencies.anyhow] +version = "1.0.31" + [dev-dependencies.mio] version = "0.6" @@ -50,6 +59,7 @@ ssl = ["openssl-sys", "openssl-probe", "curl-sys/ssl"] static-curl = ["curl-sys/static-curl"] static-ssl = ["curl-sys/static-ssl"] +zlib-ng-compat = ["curl-sys/zlib-ng-compat", "static-curl"] [target."cfg(all(unix, not(target_os = \"macos\")))".dependencies.openssl-probe] version = "0.1.2" optional = true diff -Nru cargo-0.44.1/vendor/curl/README.md cargo-0.47.0/vendor/curl/README.md --- cargo-0.44.1/vendor/curl/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,11 @@ # curl-rust -libcurl bindings for Rust +[libcurl] bindings for Rust -[Documentation](https://docs.rs/curl) +[![Latest Version](https://img.shields.io/crates/v/curl.svg)](https://crates.io/crates/curl) +[![Documentation](https://docs.rs/curl/badge.svg)](https://docs.rs/curl) +[![License](https://img.shields.io/github/license/alexcrichton/curl-rust.svg)](LICENSE) +[![Build](https://github.com/alexcrichton/curl-rust/workflows/CI/badge.svg)](https://github.com/alexcrichton/curl-rust/actions) ## Quick Start @@ -145,8 +148,7 @@ That means most likely, that curl was linked against `libcurl-nss.so` due to installed libcurl NSS development files, and that the required library `libnsspem.so` is missing. See also the curl man page: "If curl is built -against the NSS SSL library, the NSS PEM PKCS#11 module (libnsspem.so) needs to -be available for this option to work properly." +against the NSS SSL library, the NSS PEM PKCS#11 module (`libnsspem.so`) needs to be available for this option to work properly." In order to avoid this failure you can either @@ -156,10 +158,12 @@ ## License -The `curl-rust` crate is licensed under the MIT license, see `LICENSE` for more +The `curl-rust` crate is licensed under the MIT license, see [`LICENSE`](LICENSE) for more details. +[libcurl]: https://curl.haxx.se/libcurl/ +[MesaLink]: https://mesalink.io/ [OpenSSL]: https://www.openssl.org/ [Rustls]: https://github.com/ctz/rustls [Schannel]: https://docs.microsoft.com/en-us/windows/win32/com/schannel diff -Nru cargo-0.44.1/vendor/curl/src/easy/handler.rs cargo-0.47.0/vendor/curl/src/easy/handler.rs --- cargo-0.44.1/vendor/curl/src/easy/handler.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl/src/easy/handler.rs 2020-10-01 21:38:28.000000000 +0000 @@ -383,6 +383,7 @@ handle: *mut curl_sys::CURL, header_list: Option, resolve_list: Option, + connect_to_list: Option, form: Option
, error_buf: RefCell>, handler: H, @@ -461,6 +462,17 @@ /// (Added in CURL 7.49.0) V2PriorKnowledge = curl_sys::CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE as isize, + /// Setting this value will make libcurl attempt to use HTTP/3 directly to + /// server given in the URL. Note that this cannot gracefully downgrade to + /// earlier HTTP version if the server doesn't support HTTP/3. + /// + /// For more reliably upgrading to HTTP/3, set the preferred version to + /// something lower and let the server announce its HTTP/3 support via + /// Alt-Svc:. + /// + /// (Added in CURL 7.66.0) + V3 = curl_sys::CURL_HTTP_VERSION_3 as isize, + /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] @@ -614,6 +626,7 @@ handle: handle, header_list: None, resolve_list: None, + connect_to_list: None, form: None, error_buf: RefCell::new(vec![0; curl_sys::CURL_ERROR_SIZE]), handler: handler, @@ -780,18 +793,45 @@ self.setopt_long(curl_sys::CURLOPT_WILDCARDMATCH, m as c_long) } - /// Provides the unix domain socket which this handle will work with. + /// Provides the Unix domain socket which this handle will work with. /// - /// The string provided must be unix domain socket -encoded with the format: + /// The string provided must be a path to a Unix domain socket encoded with + /// the format: /// /// ```text /// /path/file.sock /// ``` + /// + /// By default this option is not set and corresponds to + /// [`CURLOPT_UNIX_SOCKET_PATH`](https://curl.haxx.se/libcurl/c/CURLOPT_UNIX_SOCKET_PATH.html). pub fn unix_socket(&mut self, unix_domain_socket: &str) -> Result<(), Error> { let socket = CString::new(unix_domain_socket)?; self.setopt_str(curl_sys::CURLOPT_UNIX_SOCKET_PATH, &socket) } + /// Provides the Unix domain socket which this handle will work with. + /// + /// The string provided must be a path to a Unix domain socket encoded with + /// the format: + /// + /// ```text + /// /path/file.sock + /// ``` + /// + /// This function is an alternative to [`Easy2::unix_socket`] that supports + /// non-UTF-8 paths and also supports disabling Unix sockets by setting the + /// option to `None`. + /// + /// By default this option is not set and corresponds to + /// [`CURLOPT_UNIX_SOCKET_PATH`](https://curl.haxx.se/libcurl/c/CURLOPT_UNIX_SOCKET_PATH.html). + pub fn unix_socket_path>(&mut self, path: Option

) -> Result<(), Error> { + if let Some(path) = path { + self.setopt_path(curl_sys::CURLOPT_UNIX_SOCKET_PATH, path.as_ref()) + } else { + self.setopt_ptr(curl_sys::CURLOPT_UNIX_SOCKET_PATH, 0 as _) + } + } + // ========================================================================= // Internal accessors @@ -847,6 +887,24 @@ self.setopt_long(curl_sys::CURLOPT_PORT, port as c_long) } + /// Connect to a specific host and port. + /// + /// Each single string should be written using the format + /// `HOST:PORT:CONNECT-TO-HOST:CONNECT-TO-PORT` where `HOST` is the host of + /// the request, `PORT` is the port of the request, `CONNECT-TO-HOST` is the + /// host name to connect to, and `CONNECT-TO-PORT` is the port to connect + /// to. + /// + /// The first string that matches the request's host and port is used. + /// + /// By default, this option is empty and corresponds to + /// [`CURLOPT_CONNECT_TO`](https://curl.haxx.se/libcurl/c/CURLOPT_CONNECT_TO.html). + pub fn connect_to(&mut self, list: List) -> Result<(), Error> { + let ptr = list::raw(&list); + self.inner.connect_to_list = Some(list); + self.setopt_ptr(curl_sys::CURLOPT_CONNECT_TO, ptr as *const _) + } + // /// Indicates whether sequences of `/../` and `/./` will be squashed or not. // /// // /// By default this option is `false` and corresponds to @@ -870,6 +928,46 @@ self.setopt_long(curl_sys::CURLOPT_PROXYPORT, port as c_long) } + /// Set CA certificate to verify peer against for proxy. + /// + /// By default this value is not set and corresponds to + /// `CURLOPT_PROXY_CAINFO`. + pub fn proxy_cainfo(&mut self, cainfo: &str) -> Result<(), Error> { + let cainfo = CString::new(cainfo)?; + self.setopt_str(curl_sys::CURLOPT_PROXY_CAINFO, &cainfo) + } + + /// Specify a directory holding CA certificates for proxy. + /// + /// The specified directory should hold multiple CA certificates to verify + /// the HTTPS proxy with. If libcurl is built against OpenSSL, the + /// certificate directory must be prepared using the OpenSSL `c_rehash` + /// utility. + /// + /// By default this value is not set and corresponds to + /// `CURLOPT_PROXY_CAPATH`. + pub fn proxy_capath>(&mut self, path: P) -> Result<(), Error> { + self.setopt_path(curl_sys::CURLOPT_PROXY_CAPATH, path.as_ref()) + } + + /// Set client certificate for proxy. + /// + /// By default this value is not set and corresponds to + /// `CURLOPT_PROXY_SSLCERT`. + pub fn proxy_sslcert(&mut self, sslcert: &str) -> Result<(), Error> { + let sslcert = CString::new(sslcert)?; + self.setopt_str(curl_sys::CURLOPT_PROXY_SSLCERT, &sslcert) + } + + /// Set private key for HTTPS proxy. + /// + /// By default this value is not set and corresponds to + /// `CURLOPT_PROXY_SSLKEY`. + pub fn proxy_sslkey(&mut self, sslkey: &str) -> Result<(), Error> { + let sslkey = CString::new(sslkey)?; + self.setopt_str(curl_sys::CURLOPT_PROXY_SSLKEY, &sslkey) + } + /// Indicates the type of proxy being used. /// /// By default this option is `ProxyType::Http` and corresponds to diff -Nru cargo-0.44.1/vendor/curl/src/easy/handle.rs cargo-0.47.0/vendor/curl/src/easy/handle.rs --- cargo-0.44.1/vendor/curl/src/easy/handle.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl/src/easy/handle.rs 2020-10-01 21:38:28.000000000 +0000 @@ -165,6 +165,11 @@ self.inner.unix_socket(unix_domain_socket) } + /// Same as [`Easy2::unix_socket_path`](struct.Easy2.html#method.unix_socket_path) + pub fn unix_socket_path>(&mut self, path: Option

) -> Result<(), Error> { + self.inner.unix_socket_path(path) + } + // ========================================================================= // Callback options @@ -553,6 +558,11 @@ self.inner.port(port) } + /// Same as [`Easy2::connect_to`](struct.Easy2.html#method.connect_to) + pub fn connect_to(&mut self, list: List) -> Result<(), Error> { + self.inner.connect_to(list) + } + /// Same as [`Easy2::proxy`](struct.Easy2.html#method.proxy) pub fn proxy(&mut self, url: &str) -> Result<(), Error> { self.inner.proxy(url) @@ -563,6 +573,26 @@ self.inner.proxy_port(port) } + /// Same as [`Easy2::proxy_cainfo`](struct.Easy2.html#method.proxy_cainfo) + pub fn proxy_cainfo(&mut self, cainfo: &str) -> Result<(), Error> { + self.inner.proxy_cainfo(cainfo) + } + + /// Same as [`Easy2::proxy_capath`](struct.Easy2.html#method.proxy_capath) + pub fn proxy_capath>(&mut self, path: P) -> Result<(), Error> { + self.inner.proxy_capath(path) + } + + /// Same as [`Easy2::proxy_sslcert`](struct.Easy2.html#method.proxy_sslcert) + pub fn proxy_sslcert(&mut self, sslcert: &str) -> Result<(), Error> { + self.inner.proxy_sslcert(sslcert) + } + + /// Same as [`Easy2::proxy_sslkey`](struct.Easy2.html#method.proxy_sslkey) + pub fn proxy_sslkey(&mut self, sslkey: &str) -> Result<(), Error> { + self.inner.proxy_sslkey(sslkey) + } + /// Same as [`Easy2::proxy_type`](struct.Easy2.html#method.proxy_type) pub fn proxy_type(&mut self, kind: ProxyType) -> Result<(), Error> { self.inner.proxy_type(kind) diff -Nru cargo-0.44.1/vendor/curl/src/lib.rs cargo-0.47.0/vendor/curl/src/lib.rs --- cargo-0.44.1/vendor/curl/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -46,6 +46,12 @@ //! There is a large number of releases for libcurl, all with different sets of //! capabilities. Robust programs may wish to inspect `Version::get()` to test //! what features are implemented in the linked build of libcurl at runtime. +//! +//! # Initialization +//! +//! The underlying libcurl library must be initialized before use and has +//! certain requirements on how this is done. Check the documentation for +//! [`init`] for more details. #![deny(missing_docs, missing_debug_implementations)] #![doc(html_root_url = "https://docs.rs/curl/0.4")] @@ -78,34 +84,63 @@ /// Initializes the underlying libcurl library. /// -/// It's not required to call this before the library is used, but it's -/// recommended to do so as soon as the program starts. +/// The underlying libcurl library must be initialized before use, and must be +/// done so on the main thread before any other threads are created by the +/// program. This crate will do this for you automatically in the following +/// scenarios: +/// +/// - Creating a new [`Easy`][easy::Easy] or [`Multi`][multi::Multi] handle +/// - At program startup on Windows, macOS, Linux, Android, or FreeBSD systems +/// +/// This should be sufficient for most applications and scenarios, but in any +/// other case, it is strongly recommended that you call this function manually +/// as soon as your program starts. +/// +/// Calling this function more than once is harmless and has no effect. +#[inline] pub fn init() { + /// Used to prevent concurrent or duplicate initialization. static INIT: Once = Once::new(); - INIT.call_once(|| { - platform_init(); - unsafe { - assert_eq!(curl_sys::curl_global_init(curl_sys::CURL_GLOBAL_ALL), 0); - } - - // Note that we explicitly don't schedule a call to - // `curl_global_cleanup`. The documentation for that function says - // - // > You must not call it when any other thread in the program (i.e. a - // > thread sharing the same memory) is running. This doesn't just mean - // > no other thread that is using libcurl. - // - // We can't ever be sure of that, so unfortunately we can't call the - // function. - }); - - #[cfg(need_openssl_init)] - fn platform_init() { - openssl_sys::init(); + + /// An exported constructor function. On supported platforms, this will be + /// invoked automatically before the program's `main` is called. + #[cfg_attr( + any(target_os = "linux", target_os = "freebsd", target_os = "android"), + link_section = ".init_array" + )] + #[cfg_attr(target_os = "macos", link_section = "__DATA,__mod_init_func")] + #[cfg_attr(target_os = "windows", link_section = ".CRT$XCU")] + static INIT_CTOR: extern "C" fn() = init_inner; + + /// This is the body of our constructor function. + #[cfg_attr( + any(target_os = "linux", target_os = "android"), + link_section = ".text.startup" + )] + extern "C" fn init_inner() { + INIT.call_once(|| { + #[cfg(need_openssl_init)] + openssl_sys::init(); + + unsafe { + assert_eq!(curl_sys::curl_global_init(curl_sys::CURL_GLOBAL_ALL), 0); + } + + // Note that we explicitly don't schedule a call to + // `curl_global_cleanup`. The documentation for that function says + // + // > You must not call it when any other thread in the program (i.e. + // > a thread sharing the same memory) is running. This doesn't just + // > mean no other thread that is using libcurl. + // + // We can't ever be sure of that, so unfortunately we can't call the + // function. + }); } - #[cfg(not(need_openssl_init))] - fn platform_init() {} + // We invoke our init function through our static to ensure the symbol isn't + // optimized away by a bug: https://github.com/rust-lang/rust/issues/47384 + INIT_CTOR(); } unsafe fn opt_str<'a>(ptr: *const libc::c_char) -> Option<&'a str> { diff -Nru cargo-0.44.1/vendor/curl/src/multi.rs cargo-0.47.0/vendor/curl/src/multi.rs --- cargo-0.44.1/vendor/curl/src/multi.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl/src/multi.rs 2020-10-01 21:38:28.000000000 +0000 @@ -696,19 +696,28 @@ } } - /// Attempt to close the multi handle and clean up all associated resources. + /// Does nothing and returns `Ok(())`. This method remains for backwards + /// compatibility. /// - /// Cleans up and removes a whole multi stack. It does not free or touch any - /// individual easy handles in any way - they still need to be closed - /// individually. + /// This method will be changed to take `self` in a future release. + #[doc(hidden)] + #[deprecated( + since = "0.4.30", + note = "cannot close safely without consuming self; \ + will be changed or removed in a future release" + )] pub fn close(&self) -> Result<(), MultiError> { - unsafe { cvt(curl_sys::curl_multi_cleanup(self.raw)) } + Ok(()) } /// Get a pointer to the raw underlying CURLM handle. pub fn raw(&self) -> *mut curl_sys::CURLM { self.raw } + + unsafe fn close_impl(&self) -> Result<(), MultiError> { + cvt(curl_sys::curl_multi_cleanup(self.raw)) + } } fn cvt(code: curl_sys::CURLMcode) -> Result<(), MultiError> { @@ -727,7 +736,7 @@ impl Drop for Multi { fn drop(&mut self) { - let _ = self.close(); + let _ = unsafe { self.close_impl() }; } } diff -Nru cargo-0.44.1/vendor/curl/tests/easy.rs cargo-0.47.0/vendor/curl/tests/easy.rs --- cargo-0.44.1/vendor/curl/tests/easy.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl/tests/easy.rs 2020-10-01 21:38:28.000000000 +0000 @@ -801,9 +801,30 @@ t!(h.borrow().perform()); } -// Stupid test to check if unix_socket is callable +#[cfg(not(windows))] #[test] fn check_unix_socket() { + let s = Server::new_unix(); + s.receive( + "\ + POST / HTTP/1.1\r\n\ + Host: localhost\r\n\ + Accept: */*\r\n\ + Content-Length: 5\r\n\ + Content-Type: application/x-www-form-urlencoded\r\n\ + \r\n\ + data\n", + ); + s.send( + "\ + HTTP/1.1 200 OK\r\n\ + \r\n", + ); + let mut h = handle(); - drop(h.unix_socket("/var/something.socks")); + t!(h.unix_socket(s.path())); + t!(h.url(&s.url("/"))); + t!(h.post(true)); + t!(h.post_fields_copy(b"data\n")); + t!(h.perform()); } diff -Nru cargo-0.44.1/vendor/curl/tests/server/mod.rs cargo-0.47.0/vendor/curl/tests/server/mod.rs --- cargo-0.44.1/vendor/curl/tests/server/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl/tests/server/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -4,12 +4,13 @@ use std::io::prelude::*; use std::io::BufReader; use std::net::{SocketAddr, TcpListener, TcpStream}; +use std::path::PathBuf; use std::sync::mpsc::{channel, Receiver, Sender}; use std::thread; pub struct Server { messages: Option>, - addr: SocketAddr, + addr: Addr, thread: Option>, } @@ -18,8 +19,13 @@ Write(String), } -fn run(listener: &TcpListener, rx: &Receiver) { - let mut socket = BufReader::new(listener.accept().unwrap().0); +enum Addr { + Tcp(SocketAddr), + Unix(PathBuf), +} + +fn run(stream: impl Read + Write, rx: &Receiver) { + let mut socket = BufReader::new(stream); for msg in rx.iter() { match msg { Message::Read(ref expected) => { @@ -110,7 +116,7 @@ let mut dst = Vec::new(); t!(socket.read_to_end(&mut dst)); - assert!(dst.len() == 0); + assert_eq!(dst.len(), 0); } fn lines_match(expected: &str, mut actual: &str) -> bool { @@ -133,22 +139,43 @@ let listener = t!(TcpListener::bind("127.0.0.1:0")); let addr = t!(listener.local_addr()); let (tx, rx) = channel(); - let thread = thread::spawn(move || run(&listener, &rx)); + let thread = thread::spawn(move || run(listener.accept().unwrap().0, &rx)); + Server { + messages: Some(tx), + addr: Addr::Tcp(addr), + thread: Some(thread), + } + } + + #[cfg(not(windows))] + pub fn new_unix() -> Server { + use std::os::unix::net::UnixListener; + + let path = "/tmp/easy_server.sock"; + std::fs::remove_file(path).ok(); + let listener = t!(UnixListener::bind(path)); + let (tx, rx) = channel(); + let thread = thread::spawn(move || run(listener.incoming().next().unwrap().unwrap(), &rx)); Server { messages: Some(tx), - addr: addr, + addr: Addr::Unix(path.into()), thread: Some(thread), } } pub fn receive(&self, msg: &str) { - let msg = msg.replace("$PORT", &self.addr.port().to_string()); - self.msg(Message::Read(msg)); + self.msg(Message::Read(self.replace_port(msg))); + } + + fn replace_port(&self, msg: &str) -> String { + match &self.addr { + Addr::Tcp(addr) => msg.replace("$PORT", &addr.port().to_string()), + Addr::Unix(_) => msg.to_string(), + } } pub fn send(&self, msg: &str) { - let msg = msg.replace("$PORT", &self.addr.port().to_string()); - self.msg(Message::Write(msg)); + self.msg(Message::Write(self.replace_port(msg))); } fn msg(&self, msg: Message) { @@ -156,17 +183,35 @@ } pub fn addr(&self) -> &SocketAddr { - &self.addr + match &self.addr { + Addr::Tcp(addr) => addr, + Addr::Unix(_) => panic!("server is a UnixListener"), + } + } + + #[cfg(not(windows))] + pub fn path(&self) -> &str { + match &self.addr { + Addr::Tcp(_) => panic!("server is a TcpListener"), + Addr::Unix(p) => p.as_os_str().to_str().unwrap(), + } } pub fn url(&self, path: &str) -> String { - format!("http://{}{}", self.addr, path) + match &self.addr { + Addr::Tcp(addr) => format!("http://{}{}", addr, path), + Addr::Unix(_) => format!("http://localhost{}", path), + } } } impl Drop for Server { fn drop(&mut self) { - drop(TcpStream::connect(&self.addr)); + match &self.addr { + Addr::Tcp(addr) => drop(TcpStream::connect(addr)), + Addr::Unix(p) => t!(std::fs::remove_file(p)), + } + drop(self.messages.take()); let res = self.thread.take().unwrap().join(); if !thread::panicking() { diff -Nru cargo-0.44.1/vendor/curl-sys/build.rs cargo-0.47.0/vendor/curl-sys/build.rs --- cargo-0.44.1/vendor/curl-sys/build.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl-sys/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -148,6 +148,7 @@ .file("curl/lib/curl_threads.c") .file("curl/lib/dotdot.c") .file("curl/lib/doh.c") + .file("curl/lib/dynbuf.c") .file("curl/lib/easy.c") .file("curl/lib/escape.c") .file("curl/lib/file.c") @@ -196,11 +197,13 @@ .file("curl/lib/url.c") .file("curl/lib/urlapi.c") .file("curl/lib/version.c") + .file("curl/lib/vtls/keylog.c") .file("curl/lib/vtls/vtls.c") .file("curl/lib/warnless.c") .file("curl/lib/wildcard.c") .define("HAVE_GETADDRINFO", None) .define("HAVE_GETPEERNAME", None) + .define("HAVE_GETSOCKNAME", None) .warnings(false); if cfg!(feature = "protocol-ftp") { @@ -234,6 +237,11 @@ .file("curl/lib/vauth/vauth.c"); } + if !windows { + cfg.define("USE_UNIX_SOCKETS", None) + .define("HAVE_SYS_UN_H", None); + } + // Configure TLS backend. Since Cargo does not support mutually exclusive // features, make sure we only compile one vtls. if cfg!(feature = "mesalink") { @@ -284,7 +292,8 @@ .define("USE_THREADS_WIN32", None) .define("HAVE_IOCTLSOCKET_FIONBIO", None) .define("USE_WINSOCK", None) - .file("curl/lib/system_win32.c"); + .file("curl/lib/system_win32.c") + .file("curl/lib/curl_multibyte.c"); if cfg!(feature = "spnego") { cfg.file("curl/lib/vauth/spnego_sspi.c"); diff -Nru cargo-0.44.1/vendor/curl-sys/.cargo-checksum.json cargo-0.47.0/vendor/curl-sys/.cargo-checksum.json --- cargo-0.44.1/vendor/curl-sys/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl-sys/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"dcd62757cc4f5ab9404bc6ca9f0ae447e729a1403948ce5106bd588ceac6a3b0"} \ No newline at end of file +{"files":{},"package":"68cad94adeb0c16558429c3c34a607acc9ea58e09a7b66310aabc9788fc5d721"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/curl-sys/Cargo.toml cargo-0.47.0/vendor/curl-sys/Cargo.toml --- cargo-0.44.1/vendor/curl-sys/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl-sys/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "curl-sys" -version = "0.4.31+curl-7.70.0" +version = "0.4.36+curl-7.71.1" authors = ["Alex Crichton "] build = "build.rs" links = "curl" @@ -32,9 +32,6 @@ version = "0.1.3" optional = true -[dependencies.libz-sys] -version = "1.0.18" - [build-dependencies.cc] version = "1.0" diff -Nru cargo-0.44.1/vendor/curl-sys/debian/patches/disable-libz-sys.patch cargo-0.47.0/vendor/curl-sys/debian/patches/disable-libz-sys.patch --- cargo-0.44.1/vendor/curl-sys/debian/patches/disable-libz-sys.patch 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/curl-sys/debian/patches/disable-libz-sys.patch 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,24 @@ +Index: curl-sys/Cargo.toml +=================================================================== +--- curl-sys.orig/Cargo.toml ++++ curl-sys/Cargo.toml +@@ -32,11 +32,6 @@ version = "0.2.2" + version = "0.1.3" + optional = true + +-[dependencies.libz-sys] +-version = "1.0.18" +-features = ["libc"] +-default-features = false +- + [build-dependencies.cc] + version = "1.0" + +@@ -52,7 +47,6 @@ spnego = [] + ssl = ["openssl-sys"] + static-curl = [] + static-ssl = ["openssl-sys"] +-zlib-ng-compat = ["libz-sys/zlib-ng", "static-curl"] + [target."cfg(all(unix, not(target_os = \"macos\")))".dependencies.openssl-sys] + version = "0.9" + optional = true diff -Nru cargo-0.44.1/vendor/curl-sys/debian/patches/disable-mesalink.patch cargo-0.47.0/vendor/curl-sys/debian/patches/disable-mesalink.patch --- cargo-0.44.1/vendor/curl-sys/debian/patches/disable-mesalink.patch 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl-sys/debian/patches/disable-mesalink.patch 2020-10-01 21:38:28.000000000 +0000 @@ -1,9 +1,11 @@ Description: for now, disable mesalink as it has some extra deps ---- a/Cargo.toml -+++ b/Cargo.toml -@@ -35,11 +35,6 @@ - [dependencies.libz-sys] - version = "1.0.18" +Index: curl-sys/Cargo.toml +=================================================================== +--- curl-sys.orig/Cargo.toml ++++ curl-sys/Cargo.toml +@@ -37,11 +37,6 @@ version = "1.0.18" + features = ["libc"] + default-features = false -[dependencies.mesalink] -version = "1.1.0-cratesio" diff -Nru cargo-0.44.1/vendor/curl-sys/debian/patches/disable-vendor.patch cargo-0.47.0/vendor/curl-sys/debian/patches/disable-vendor.patch --- cargo-0.44.1/vendor/curl-sys/debian/patches/disable-vendor.patch 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl-sys/debian/patches/disable-vendor.patch 2020-10-01 21:38:28.000000000 +0000 @@ -1,17 +1,21 @@ ---- a/Cargo.toml -+++ b/Cargo.toml -@@ -49,7 +49,7 @@ +Index: curl-sys/Cargo.toml +=================================================================== +--- curl-sys.orig/Cargo.toml ++++ curl-sys/Cargo.toml +@@ -51,7 +51,7 @@ protocol-ftp = [] spnego = [] ssl = ["openssl-sys"] static-curl = [] -static-ssl = ["openssl-sys/vendored"] +static-ssl = ["openssl-sys"] + zlib-ng-compat = ["libz-sys/zlib-ng", "static-curl"] [target."cfg(all(unix, not(target_os = \"macos\")))".dependencies.openssl-sys] version = "0.9" - optional = true ---- a/build.rs -+++ b/build.rs -@@ -19,8 +19,9 @@ +Index: curl-sys/build.rs +=================================================================== +--- curl-sys.orig/build.rs ++++ curl-sys/build.rs +@@ -19,8 +19,9 @@ fn main() { return println!("cargo:rustc-flags=-l curl"); } diff -Nru cargo-0.44.1/vendor/curl-sys/debian/patches/series cargo-0.47.0/vendor/curl-sys/debian/patches/series --- cargo-0.44.1/vendor/curl-sys/debian/patches/series 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl-sys/debian/patches/series 2020-10-01 21:38:28.000000000 +0000 @@ -1,2 +1,3 @@ disable-mesalink.patch disable-vendor.patch +disable-libz-sys.patch diff -Nru cargo-0.44.1/vendor/curl-sys/lib.rs cargo-0.47.0/vendor/curl-sys/lib.rs --- cargo-0.44.1/vendor/curl-sys/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl-sys/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -576,6 +576,11 @@ // pub const CURLOPT_LOGIN_OPTIONS: CURLoption = CURLOPTTYPE_OBJECTPOINT + 224; pub const CURLOPT_UNIX_SOCKET_PATH: CURLoption = CURLOPTTYPE_OBJECTPOINT + 231; pub const CURLOPT_PIPEWAIT: CURLoption = CURLOPTTYPE_LONG + 237; +pub const CURLOPT_CONNECT_TO: CURLoption = CURLOPTTYPE_OBJECTPOINT + 243; +pub const CURLOPT_PROXY_CAINFO: CURLoption = CURLOPTTYPE_OBJECTPOINT + 246; +pub const CURLOPT_PROXY_CAPATH: CURLoption = CURLOPTTYPE_OBJECTPOINT + 247; +pub const CURLOPT_PROXY_SSLCERT: CURLoption = CURLOPTTYPE_OBJECTPOINT + 254; +pub const CURLOPT_PROXY_SSLKEY: CURLoption = CURLOPTTYPE_OBJECTPOINT + 256; pub const CURL_IPRESOLVE_WHATEVER: c_int = 0; pub const CURL_IPRESOLVE_V4: c_int = 1; @@ -602,6 +607,9 @@ /// Please use HTTP 2 without HTTP/1.1 Upgrade /// (Added in CURL 7.49.0) pub const CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE: c_int = 5; +/// Makes use of explicit HTTP/3 without fallback. +/// (Added in CURL 7.66.0) +pub const CURL_HTTP_VERSION_3: c_int = 30; // Note that the type here is wrong, it's just intended to just be an enum. pub const CURL_SSLVERSION_DEFAULT: CURLoption = 0; diff -Nru cargo-0.44.1/vendor/curl-sys/.pc/applied-patches cargo-0.47.0/vendor/curl-sys/.pc/applied-patches --- cargo-0.44.1/vendor/curl-sys/.pc/applied-patches 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl-sys/.pc/applied-patches 2020-10-01 21:38:28.000000000 +0000 @@ -1,2 +1,3 @@ disable-mesalink.patch disable-vendor.patch +disable-libz-sys.patch diff -Nru cargo-0.44.1/vendor/curl-sys/.pc/disable-libz-sys.patch/Cargo.toml cargo-0.47.0/vendor/curl-sys/.pc/disable-libz-sys.patch/Cargo.toml --- cargo-0.44.1/vendor/curl-sys/.pc/disable-libz-sys.patch/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/curl-sys/.pc/disable-libz-sys.patch/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,68 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "curl-sys" +version = "0.4.36+curl-7.71.1" +authors = ["Alex Crichton "] +build = "build.rs" +links = "curl" +description = "Native bindings to the libcurl library" +documentation = "https://docs.rs/curl-sys" +categories = ["external-ffi-bindings"] +license = "MIT" +repository = "https://github.com/alexcrichton/curl-rust" + +[lib] +name = "curl_sys" +path = "lib.rs" +[dependencies.libc] +version = "0.2.2" + +[dependencies.libnghttp2-sys] +version = "0.1.3" +optional = true + +[dependencies.libz-sys] +version = "1.0.18" +features = ["libc"] +default-features = false + +[build-dependencies.cc] +version = "1.0" + +[build-dependencies.pkg-config] +version = "0.3.3" + +[features] +default = ["ssl"] +force-system-lib-on-osx = [] +http2 = ["libnghttp2-sys"] +protocol-ftp = [] +spnego = [] +ssl = ["openssl-sys"] +static-curl = [] +static-ssl = ["openssl-sys"] +zlib-ng-compat = ["libz-sys/zlib-ng", "static-curl"] +[target."cfg(all(unix, not(target_os = \"macos\")))".dependencies.openssl-sys] +version = "0.9" +optional = true +[target."cfg(target_env = \"msvc\")".build-dependencies.vcpkg] +version = "0.2" +[target."cfg(windows)".dependencies.winapi] +version = "0.3" +features = ["winsock2", "ws2def"] +[badges.appveyor] +repository = "alexcrichton/curl-rust" + +[badges.travis-ci] +repository = "alexcrichton/curl-rust" diff -Nru cargo-0.44.1/vendor/curl-sys/.pc/disable-mesalink.patch/Cargo.toml cargo-0.47.0/vendor/curl-sys/.pc/disable-mesalink.patch/Cargo.toml --- cargo-0.44.1/vendor/curl-sys/.pc/disable-mesalink.patch/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl-sys/.pc/disable-mesalink.patch/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "curl-sys" -version = "0.4.31+curl-7.70.0" +version = "0.4.36+curl-7.71.1" authors = ["Alex Crichton "] build = "build.rs" links = "curl" @@ -34,6 +34,8 @@ [dependencies.libz-sys] version = "1.0.18" +features = ["libc"] +default-features = false [dependencies.mesalink] version = "1.1.0-cratesio" @@ -55,6 +57,7 @@ ssl = ["openssl-sys"] static-curl = [] static-ssl = ["openssl-sys/vendored"] +zlib-ng-compat = ["libz-sys/zlib-ng", "static-curl"] [target."cfg(all(unix, not(target_os = \"macos\")))".dependencies.openssl-sys] version = "0.9" optional = true diff -Nru cargo-0.44.1/vendor/curl-sys/.pc/disable-vendor.patch/build.rs cargo-0.47.0/vendor/curl-sys/.pc/disable-vendor.patch/build.rs --- cargo-0.44.1/vendor/curl-sys/.pc/disable-vendor.patch/build.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl-sys/.pc/disable-vendor.patch/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -147,6 +147,7 @@ .file("curl/lib/curl_threads.c") .file("curl/lib/dotdot.c") .file("curl/lib/doh.c") + .file("curl/lib/dynbuf.c") .file("curl/lib/easy.c") .file("curl/lib/escape.c") .file("curl/lib/file.c") @@ -195,11 +196,13 @@ .file("curl/lib/url.c") .file("curl/lib/urlapi.c") .file("curl/lib/version.c") + .file("curl/lib/vtls/keylog.c") .file("curl/lib/vtls/vtls.c") .file("curl/lib/warnless.c") .file("curl/lib/wildcard.c") .define("HAVE_GETADDRINFO", None) .define("HAVE_GETPEERNAME", None) + .define("HAVE_GETSOCKNAME", None) .warnings(false); if cfg!(feature = "protocol-ftp") { @@ -233,6 +236,11 @@ .file("curl/lib/vauth/vauth.c"); } + if !windows { + cfg.define("USE_UNIX_SOCKETS", None) + .define("HAVE_SYS_UN_H", None); + } + // Configure TLS backend. Since Cargo does not support mutually exclusive // features, make sure we only compile one vtls. if cfg!(feature = "mesalink") { @@ -283,7 +291,8 @@ .define("USE_THREADS_WIN32", None) .define("HAVE_IOCTLSOCKET_FIONBIO", None) .define("USE_WINSOCK", None) - .file("curl/lib/system_win32.c"); + .file("curl/lib/system_win32.c") + .file("curl/lib/curl_multibyte.c"); if cfg!(feature = "spnego") { cfg.file("curl/lib/vauth/spnego_sspi.c"); diff -Nru cargo-0.44.1/vendor/curl-sys/.pc/disable-vendor.patch/Cargo.toml cargo-0.47.0/vendor/curl-sys/.pc/disable-vendor.patch/Cargo.toml --- cargo-0.44.1/vendor/curl-sys/.pc/disable-vendor.patch/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/curl-sys/.pc/disable-vendor.patch/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "curl-sys" -version = "0.4.31+curl-7.70.0" +version = "0.4.36+curl-7.71.1" authors = ["Alex Crichton "] build = "build.rs" links = "curl" @@ -34,6 +34,8 @@ [dependencies.libz-sys] version = "1.0.18" +features = ["libc"] +default-features = false [build-dependencies.cc] version = "1.0" @@ -50,6 +52,7 @@ ssl = ["openssl-sys"] static-curl = [] static-ssl = ["openssl-sys/vendored"] +zlib-ng-compat = ["libz-sys/zlib-ng", "static-curl"] [target."cfg(all(unix, not(target_os = \"macos\")))".dependencies.openssl-sys] version = "0.9" optional = true diff -Nru cargo-0.44.1/vendor/filetime/.cargo-checksum.json cargo-0.47.0/vendor/filetime/.cargo-checksum.json --- cargo-0.44.1/vendor/filetime/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/filetime/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"affc17579b132fc2461adf7c575cc6e8b134ebca52c51f5411388965227dc695"} \ No newline at end of file +{"files":{},"package":"3ed85775dcc68644b5c950ac06a2b23768d3bc9390464151aaf27136998dcf9e"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/filetime/Cargo.toml cargo-0.47.0/vendor/filetime/Cargo.toml --- cargo-0.44.1/vendor/filetime/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/filetime/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "filetime" -version = "0.2.10" +version = "0.2.12" authors = ["Alex Crichton "] description = "Platform-agnostic accessors of timestamps in File metadata\n" homepage = "https://github.com/alexcrichton/filetime" diff -Nru cargo-0.44.1/vendor/filetime/src/redox.rs cargo-0.47.0/vendor/filetime/src/redox.rs --- cargo-0.44.1/vendor/filetime/src/redox.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/filetime/src/redox.rs 2020-10-01 21:38:28.000000000 +0000 @@ -7,29 +7,75 @@ pub fn set_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { let fd = syscall::open(p.as_os_str().as_bytes(), 0) .map_err(|err| io::Error::from_raw_os_error(err.errno))?; - set_file_times_redox(fd, atime, mtime) -} - -pub fn set_file_mtime(_p: &Path, _mtime: FileTime) -> io::Result<()> { - unimplemented!() + let res = set_file_times_redox(fd, atime, mtime); + let _ = syscall::close(fd); + res } -pub fn set_file_atime(_p: &Path, _atime: FileTime) -> io::Result<()> { - unimplemented!() +pub fn set_file_mtime(p: &Path, mtime: FileTime) -> io::Result<()> { + let fd = syscall::open(p.as_os_str().as_bytes(), 0) + .map_err(|err| io::Error::from_raw_os_error(err.errno))?; + let mut st = syscall::Stat::default(); + let res = match syscall::fstat(fd, &mut st) { + Err(err) => Err(io::Error::from_raw_os_error(err.errno)), + Ok(_) => set_file_times_redox( + fd, + FileTime { + seconds: st.st_atime as i64, + nanos: st.st_atime_nsec as u32, + }, + mtime, + ), + }; + let _ = syscall::close(fd); + res } -pub fn set_file_handle_times( - _f: &File, - _atime: Option, - _mtime: Option, -) -> io::Result<()> { - unimplemented!() +pub fn set_file_atime(p: &Path, atime: FileTime) -> io::Result<()> { + let fd = syscall::open(p.as_os_str().as_bytes(), 0) + .map_err(|err| io::Error::from_raw_os_error(err.errno))?; + let mut st = syscall::Stat::default(); + let res = match syscall::fstat(fd, &mut st) { + Err(err) => Err(io::Error::from_raw_os_error(err.errno)), + Ok(_) => set_file_times_redox( + fd, + atime, + FileTime { + seconds: st.st_mtime as i64, + nanos: st.st_mtime_nsec as u32, + }, + ), + }; + let _ = syscall::close(fd); + res } pub fn set_symlink_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { let fd = syscall::open(p.as_os_str().as_bytes(), syscall::O_NOFOLLOW) .map_err(|err| io::Error::from_raw_os_error(err.errno))?; - set_file_times_redox(fd, atime, mtime) + let res = set_file_times_redox(fd, atime, mtime); + let _ = syscall::close(fd); + res +} + +pub fn set_file_handle_times( + f: &File, + atime: Option, + mtime: Option, +) -> io::Result<()> { + let (atime1, mtime1) = match (atime, mtime) { + (Some(a), Some(b)) => (a, b), + (None, None) => return Ok(()), + (Some(a), None) => { + let meta = f.metadata()?; + (a, FileTime::from_last_modification_time(&meta)) + } + (None, Some(b)) => { + let meta = f.metadata()?; + (FileTime::from_last_access_time(&meta), b) + } + }; + set_file_times_redox(f.as_raw_fd() as usize, atime1, mtime1) } fn set_file_times_redox(fd: usize, atime: FileTime, mtime: FileTime) -> io::Result<()> { @@ -43,9 +89,7 @@ } let times = [to_timespec(&atime), to_timespec(&mtime)]; - let res = syscall::futimens(fd, ×); - let _ = syscall::close(fd); - match res { + match syscall::futimens(fd, ×) { Ok(_) => Ok(()), Err(err) => Err(io::Error::from_raw_os_error(err.errno)), } diff -Nru cargo-0.44.1/vendor/filetime/src/unix/android.rs cargo-0.47.0/vendor/filetime/src/unix/android.rs --- cargo-0.44.1/vendor/filetime/src/unix/android.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/filetime/src/unix/android.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,63 @@ +use crate::FileTime; +use std::ffi::CString; +use std::fs::File; +use std::io; +use std::os::unix::prelude::*; +use std::path::Path; + +pub fn set_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), Some(mtime), false) +} + +pub fn set_file_mtime(p: &Path, mtime: FileTime) -> io::Result<()> { + set_times(p, None, Some(mtime), false) +} + +pub fn set_file_atime(p: &Path, atime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), None, false) +} + +pub fn set_file_handle_times( + f: &File, + atime: Option, + mtime: Option, +) -> io::Result<()> { + let times = [super::to_timespec(&atime), super::to_timespec(&mtime)]; + + // On Android NDK before version 19, `futimens` is not available. + // + // For better compatibility, we reimplement `futimens` using `utimensat`, + // the same way as bionic libc uses it to implement `futimens`. + let rc = unsafe { libc::utimensat(f.as_raw_fd(), core::ptr::null(), times.as_ptr(), 0) }; + if rc == 0 { + Ok(()) + } else { + Err(io::Error::last_os_error()) + } +} + +pub fn set_symlink_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), Some(mtime), false) +} + +fn set_times( + p: &Path, + atime: Option, + mtime: Option, + symlink: bool, +) -> io::Result<()> { + let flags = if symlink { + libc::AT_SYMLINK_NOFOLLOW + } else { + 0 + }; + + let p = CString::new(p.as_os_str().as_bytes())?; + let times = [super::to_timespec(&atime), super::to_timespec(&mtime)]; + let rc = unsafe { libc::utimensat(libc::AT_FDCWD, p.as_ptr(), times.as_ptr(), flags) }; + if rc == 0 { + Ok(()) + } else { + Err(io::Error::last_os_error()) + } +} diff -Nru cargo-0.44.1/vendor/filetime/src/unix/mod.rs cargo-0.47.0/vendor/filetime/src/unix/mod.rs --- cargo-0.44.1/vendor/filetime/src/unix/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/filetime/src/unix/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -8,12 +8,14 @@ mod utimes; mod linux; pub use self::linux::*; + } else if #[cfg(target_os = "android")] { + mod android; + pub use self::android::*; } else if #[cfg(target_os = "macos")] { mod utimes; mod macos; pub use self::macos::*; - } else if #[cfg(any(target_os = "android", - target_os = "solaris", + } else if #[cfg(any(target_os = "solaris", target_os = "illumos", target_os = "emscripten", target_os = "freebsd", @@ -34,6 +36,9 @@ if #[cfg(target_os = "macos")] { // https://github.com/apple/darwin-xnu/blob/a449c6a3b8014d9406c2ddbdc81795da24aa7443/bsd/sys/stat.h#L541 const UTIME_OMIT: i64 = -2; + } else if #[cfg(target_os = "openbsd")] { + // https://github.com/openbsd/src/blob/master/sys/sys/stat.h#L189 + const UTIME_OMIT: i64 = -1; } else { const UTIME_OMIT: i64 = 1_073_741_822; } diff -Nru cargo-0.44.1/vendor/flate2/.cargo-checksum.json cargo-0.47.0/vendor/flate2/.cargo-checksum.json --- cargo-0.44.1/vendor/flate2/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/flate2/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"2cfff41391129e0a856d6d822600b8d71179d46879e310417eb9c762eb178b42"} \ No newline at end of file +{"files":{},"package":"da80be589a72651dcda34d8b35bcdc9b7254ad06325611074d9cc0fbb19f60ee"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/flate2/Cargo.lock cargo-0.47.0/vendor/flate2/Cargo.lock --- cargo-0.44.1/vendor/flate2/Cargo.lock 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/flate2/Cargo.lock 2020-10-01 21:38:28.000000000 +0000 @@ -1,10 +1,10 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. [[package]] -name = "adler32" -version = "1.0.4" +name = "adler" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2" +checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" [[package]] name = "autocfg" @@ -36,9 +36,9 @@ [[package]] name = "cc" -version = "1.0.50" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" +checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518" [[package]] name = "cfg-if" @@ -65,6 +65,15 @@ ] [[package]] +name = "cmake" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e56268c17a6248366d66d4a47a3381369d068cce8409bb1716ed77ea32163bb" +dependencies = [ + "cc", +] + +[[package]] name = "crc32fast" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -101,12 +110,13 @@ [[package]] name = "crossbeam-queue" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c695eeca1e7173472a32221542ae469b3e9aac3a4fc81f7696bcad82029493db" +checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" dependencies = [ "cfg-if", "crossbeam-utils", + "maybe-uninit", ] [[package]] @@ -122,7 +132,7 @@ [[package]] name = "flate2" -version = "1.0.14" +version = "1.0.18" dependencies = [ "cfg-if", "cloudflare-zlib-sys", @@ -141,9 +151,9 @@ [[package]] name = "fnv" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fuchsia-zircon" @@ -180,9 +190,9 @@ [[package]] name = "hermit-abi" -version = "0.1.8" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1010591b26bbfe835e9faeabeb11866061cc7dcebffd56ad7d0942d0e61aefd8" +checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9" dependencies = [ "libc", ] @@ -214,17 +224,18 @@ [[package]] name = "libc" -version = "0.2.67" +version = "0.2.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018" +checksum = "a2f02823cf78b754822df5f7f268fb59822e7296276d3e069d8e8cb26a14bd10" [[package]] name = "libz-sys" -version = "1.0.25" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb5e43362e38e2bca2fd5f5134c4d4564a23a5c28e9b95411652021a8675ebe" +checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655" dependencies = [ "cc", + "cmake", "libc", "pkg-config", "vcpkg", @@ -232,18 +243,18 @@ [[package]] name = "lock_api" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b2de95ecb4691949fea4716ca53cdbcfccb2c612e19644a8bad05edcf9f47b" +checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" dependencies = [ "scopeguard", ] [[package]] name = "log" -version = "0.4.8" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" dependencies = [ "cfg-if", ] @@ -256,9 +267,9 @@ [[package]] name = "memoffset" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4fc2c02a7e374099d4ee95a193111f72d2110197fe200272371758f6c3643d8" +checksum = "c198b026e1bbf08a937e94c6c60f9ec4a2267f5b0d2eec9c1b21b061ce2be55f" dependencies = [ "autocfg", ] @@ -275,18 +286,18 @@ [[package]] name = "miniz_oxide" -version = "0.3.6" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa679ff6578b1cddee93d7e82e263b94a575e0bfced07284eb0c037c1d2416a5" +checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" dependencies = [ - "adler32", + "adler", ] [[package]] name = "mio" -version = "0.6.21" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" +checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" dependencies = [ "cfg-if", "fuchsia-zircon", @@ -315,20 +326,20 @@ [[package]] name = "net2" -version = "0.2.33" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" +checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" dependencies = [ "cfg-if", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "num_cpus" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" dependencies = [ "hermit-abi", "libc", @@ -357,20 +368,20 @@ "redox_syscall", "rustc_version", "smallvec", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "pkg-config" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" +checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" [[package]] name = "ppv-lite86" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" +checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" [[package]] name = "quickcheck" @@ -425,9 +436,9 @@ [[package]] name = "redox_syscall" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "rustc_version" @@ -557,9 +568,9 @@ [[package]] name = "vcpkg" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fc439f2794e98976c88a2a2dafce96b930fe8010b0a256b3c2199a773933168" +checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" [[package]] name = "wasi" @@ -575,9 +586,9 @@ [[package]] name = "winapi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", diff -Nru cargo-0.44.1/vendor/flate2/Cargo.toml cargo-0.47.0/vendor/flate2/Cargo.toml --- cargo-0.44.1/vendor/flate2/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/flate2/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,16 +13,16 @@ [package] edition = "2018" name = "flate2" -version = "1.0.14" -authors = ["Alex Crichton "] -description = "Bindings to miniz.c for DEFLATE compression and decompression exposed as\nReader/Writer streams. Contains bindings for zlib, deflate, and gzip-based\nstreams.\n" -homepage = "https://github.com/alexcrichton/flate2-rs" +version = "1.0.18" +authors = ["Alex Crichton ", "Josh Triplett "] +description = "DEFLATE compression and decompression exposed as Read/BufRead/Write streams.\nSupports miniz_oxide, miniz.c, and multiple zlib implementations. Supports\nzlib, gzip, and raw deflate streams.\n" +homepage = "https://github.com/rust-lang/flate2-rs" documentation = "https://docs.rs/flate2" readme = "README.md" -keywords = ["gzip", "flate", "zlib", "encoding"] +keywords = ["gzip", "deflate", "zlib", "zlib-ng", "encoding"] categories = ["compression", "api-bindings"] license = "MIT/Apache-2.0" -repository = "https://github.com/alexcrichton/flate2-rs" +repository = "https://github.com/rust-lang/flate2-rs" [dependencies.cfg-if] version = "0.1.6" @@ -41,12 +41,14 @@ version = "0.2.65" [dependencies.libz-sys] -version = "1.0.25" +version = "1.1.0" optional = true +default-features = false [dependencies.miniz_oxide] -version = "0.3.5" +version = "0.4.0" optional = true +default-features = false [dependencies.tokio-io] version = "0.1.11" @@ -78,10 +80,7 @@ miniz-sys = ["zlib"] tokio = ["tokio-io", "futures"] zlib = ["any_zlib", "libz-sys"] +zlib-ng-compat = ["zlib", "libz-sys/zlib-ng"] [target."cfg(all(target_arch = \"wasm32\", not(target_os = \"emscripten\")))".dependencies.miniz_oxide] -version = "0.3.5" -[badges.appveyor] -repository = "alexcrichton/flate2-rs" - -[badges.travis-ci] -repository = "alexcrichton/flate2-rs" +version = "0.4.0" +default-features = false diff -Nru cargo-0.44.1/vendor/flate2/.pc/disable-miniz.patch/Cargo.toml cargo-0.47.0/vendor/flate2/.pc/disable-miniz.patch/Cargo.toml --- cargo-0.44.1/vendor/flate2/.pc/disable-miniz.patch/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/flate2/.pc/disable-miniz.patch/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,16 +13,16 @@ [package] edition = "2018" name = "flate2" -version = "1.0.14" -authors = ["Alex Crichton "] -description = "Bindings to miniz.c for DEFLATE compression and decompression exposed as\nReader/Writer streams. Contains bindings for zlib, deflate, and gzip-based\nstreams.\n" -homepage = "https://github.com/alexcrichton/flate2-rs" +version = "1.0.18" +authors = ["Alex Crichton ", "Josh Triplett "] +description = "DEFLATE compression and decompression exposed as Read/BufRead/Write streams.\nSupports miniz_oxide, miniz.c, and multiple zlib implementations. Supports\nzlib, gzip, and raw deflate streams.\n" +homepage = "https://github.com/rust-lang/flate2-rs" documentation = "https://docs.rs/flate2" readme = "README.md" -keywords = ["gzip", "flate", "zlib", "encoding"] +keywords = ["gzip", "deflate", "zlib", "zlib-ng", "encoding"] categories = ["compression", "api-bindings"] license = "MIT/Apache-2.0" -repository = "https://github.com/alexcrichton/flate2-rs" +repository = "https://github.com/rust-lang/flate2-rs" [dependencies.cfg-if] version = "0.1.6" @@ -41,16 +41,18 @@ version = "0.2.65" [dependencies.libz-sys] -version = "1.0.25" +version = "1.1.0" optional = true +default-features = false [dependencies.miniz-sys] version = "0.1.11" optional = true [dependencies.miniz_oxide] -version = "0.3.5" +version = "0.4.0" optional = true +default-features = false [dependencies.tokio-io] version = "0.1.11" @@ -81,10 +83,7 @@ rust_backend = ["miniz_oxide"] tokio = ["tokio-io", "futures"] zlib = ["any_zlib", "libz-sys"] +zlib-ng-compat = ["zlib", "libz-sys/zlib-ng"] [target."cfg(all(target_arch = \"wasm32\", not(target_os = \"emscripten\")))".dependencies.miniz_oxide] -version = "0.3.5" -[badges.appveyor] -repository = "alexcrichton/flate2-rs" - -[badges.travis-ci] -repository = "alexcrichton/flate2-rs" +version = "0.4.0" +default-features = false diff -Nru cargo-0.44.1/vendor/flate2/README.md cargo-0.47.0/vendor/flate2/README.md --- cargo-0.44.1/vendor/flate2/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/flate2/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -5,9 +5,9 @@ A streaming compression/decompression library DEFLATE-based streams in Rust. -This crate by default implemented as a wrapper around the `miniz_oxide` crate, a -port of `miniz.c` to Rust. This crate can also optionally use other [backends](#Backends) like the zlib library -or `miniz.c` itself. +This crate by default uses the `miniz_oxide` crate, a port of `miniz.c` to pure +Rust. This crate also supports other [backends](#Backends), such as the widely +available zlib library or the high-performance zlib-ng library. Supported formats: @@ -52,29 +52,40 @@ ## Backends -Using zlib instead of the (default) Rust backend: +The default `miniz_oxide` backend has the advantage of being pure Rust, but it +has relatively low performance. For higher performance, you can use zlib +instead: ```toml [dependencies] -flate2 = { version = "1.0", features = ["zlib"], default-features = false } +flate2 = { version = "1.0.17", features = ["zlib"], default-features = false } ``` -The cloudflare optimized version of zlib is also available. -While it's significantly faster it requires a x86-64 CPU with SSE 4.2 or ARM64 with NEON & CRC. -It does not support 32-bit CPUs at all and is incompatible with mingw. -For more information check the [crate documentation](https://crates.io/crates/cloudflare-zlib-sys). +This supports either the high-performance zlib-ng backend (in zlib-compat mode) +or the use of a shared system zlib library. To explicitly opt into the fast +zlib-ng backend, use: ```toml [dependencies] -flate2 = { version = "1.0", features = ["cloudflare_zlib"], default-features = false } +flate2 = { version = "1.0.17", features = ["zlib-ng-compat"], default-features = false } ``` -Using `miniz.c`: +Note that if any crate in your dependency graph explicitly requests stock zlib, +or uses libz-sys directly without `default-features = false`, you'll get stock +zlib rather than zlib-ng. See [the libz-sys +README](https://github.com/rust-lang/libz-sys/blob/main/README.md) for details. + +For compatibility with previous versions of `flate2`, the cloudflare optimized +version of zlib is available, via the `cloudflare_zlib` feature. It's not as +fast as zlib-ng, but it's faster than stock zlib. It requires a x86-64 CPU with +SSE 4.2 or ARM64 with NEON & CRC. It does not support 32-bit CPUs at all and is +incompatible with mingw. For more information check the [crate +documentation](https://crates.io/crates/cloudflare-zlib-sys). Note that +`cloudflare_zlib` will cause breakage if any other crate in your crate graph +uses another version of zlib/libz. -```toml -[dependencies] -flate2 = { version = "1.0", features = ["miniz-sys"], default-features = false } -``` +For compatibility with previous versions of `flate2`, the C version of `miniz.c` +is still available, using the feature `miniz-sys`. # License diff -Nru cargo-0.44.1/vendor/flate2/src/ffi/c.rs cargo-0.47.0/vendor/flate2/src/ffi/c.rs --- cargo-0.44.1/vendor/flate2/src/ffi/c.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/flate2/src/ffi/c.rs 2020-10-01 21:38:28.000000000 +0000 @@ -273,7 +273,7 @@ } else { -(window_bits as c_int) }, - 9, + 8, MZ_DEFAULT_STRATEGY, ); assert_eq!(ret, 0); @@ -345,7 +345,10 @@ } /// Zlib specific -#[cfg(all(feature = "zlib", not(feature = "cloudflare_zlib")))] +#[cfg(any( + feature = "zlib-ng-compat", + all(feature = "zlib", not(feature = "cloudflare_zlib")) +))] #[allow(bad_style)] mod c_backend { use libc::{c_char, c_int}; @@ -409,7 +412,7 @@ } /// Cloudflare optimized Zlib specific -#[cfg(feature = "cloudflare_zlib")] +#[cfg(all(feature = "cloudflare_zlib", not(feature = "zlib-ng-compat")))] #[allow(bad_style)] mod c_backend { use libc::{c_char, c_int}; diff -Nru cargo-0.44.1/vendor/flate2/src/ffi/mod.rs cargo-0.47.0/vendor/flate2/src/ffi/mod.rs --- cargo-0.44.1/vendor/flate2/src/ffi/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/flate2/src/ffi/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -36,7 +36,7 @@ // Default to Rust implementation unless explicitly opted in to a different backend. cfg_if::cfg_if! { - if #[cfg(any(feature = "miniz-sys", feature = "zlib", feature = "cloudflare_zlib"))] { + if #[cfg(any(feature = "miniz-sys", feature = "any_zlib"))] { mod c; pub use self::c::*; } else { diff -Nru cargo-0.44.1/vendor/fs2/.cargo-checksum.json cargo-0.47.0/vendor/fs2/.cargo-checksum.json --- cargo-0.44.1/vendor/fs2/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/fs2/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{},"package":"9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/fs2/Cargo.toml cargo-0.47.0/vendor/fs2/Cargo.toml --- cargo-0.44.1/vendor/fs2/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/fs2/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "fs2" -version = "0.4.3" -authors = ["Dan Burkert "] -description = "Cross-platform file locks and file duplication." -documentation = "https://docs.rs/fs2" -keywords = ["file", "file-system", "lock", "duplicate", "flock"] -license = "MIT/Apache-2.0" -repository = "https://github.com/danburkert/fs2-rs" -[dev-dependencies.tempdir] -version = "0.3" -[target."cfg(unix)".dependencies.libc] -version = "0.2.30" -[target."cfg(windows)".dependencies.winapi] -version = "0.3" -features = ["handleapi", "processthreadsapi", "winerror", "fileapi", "winbase", "std"] -[badges.appveyor] -repository = "danburkert/fs2-rs" - -[badges.travis-ci] -repository = "danburkert/fs2-rs" diff -Nru cargo-0.44.1/vendor/fs2/LICENSE-APACHE cargo-0.47.0/vendor/fs2/LICENSE-APACHE --- cargo-0.44.1/vendor/fs2/LICENSE-APACHE 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/fs2/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru cargo-0.44.1/vendor/fs2/LICENSE-MIT cargo-0.47.0/vendor/fs2/LICENSE-MIT --- cargo-0.44.1/vendor/fs2/LICENSE-MIT 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/fs2/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -Copyright (c) 2015 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.44.1/vendor/fs2/README.md cargo-0.47.0/vendor/fs2/README.md --- cargo-0.44.1/vendor/fs2/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/fs2/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -# fs2 - -Extended utilities for working with files and filesystems in Rust. `fs2` -requires Rust stable 1.8 or greater. - -[![Build Status](https://travis-ci.org/danburkert/fs2-rs.svg?branch=master)](https://travis-ci.org/danburkert/fs2-rs) -[![Windows Build status](https://ci.appveyor.com/api/projects/status/iuvjv1aaaml0rntt/branch/master?svg=true)](https://ci.appveyor.com/project/danburkert/fs2-rs/branch/master) -[![Documentation](https://docs.rs/fs2/badge.svg)](https://docs.rs/memmap) -[![Crate](https://img.shields.io/crates/v/fs2.svg)](https://crates.io/crates/memmap) - -## Features - -- [x] file descriptor duplication. -- [x] file locks. -- [x] file (pre)allocation. -- [x] file allocation information. -- [x] filesystem space usage information. - -## Platforms - -`fs2` should work on any platform supported by -[`libc`](https://github.com/rust-lang-nursery/libc#platforms-and-documentation). - -`fs2` is continuously tested on: - * `x86_64-unknown-linux-gnu` (Linux) - * `i686-unknown-linux-gnu` - * `x86_64-apple-darwin` (OSX) - * `i686-apple-darwin` - * `x86_64-pc-windows-msvc` (Windows) - * `i686-pc-windows-msvc` - * `x86_64-pc-windows-gnu` - * `i686-pc-windows-gnu` - -## Benchmarks - -Simple benchmarks are provided for the methods provided. Many of these -benchmarks use files in a temporary directory. On many modern Linux distros the -default temporary directory, `/tmp`, is mounted on a tempfs filesystem, which -will have different performance characteristics than a disk-backed filesystem. -The temporary directory is configurable at runtime through the environment (see -[`env::temp_dir`](https://doc.rust-lang.org/stable/std/env/fn.temp_dir.html)). - -## License - -`fs2` is primarily distributed under the terms of both the MIT license and the -Apache License (Version 2.0). - -See [LICENSE-APACHE](LICENSE-APACHE), [LICENSE-MIT](LICENSE-MIT) for details. - -Copyright (c) 2015 Dan Burkert. diff -Nru cargo-0.44.1/vendor/fs2/src/lib.rs cargo-0.47.0/vendor/fs2/src/lib.rs --- cargo-0.44.1/vendor/fs2/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/fs2/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,458 +0,0 @@ -//! Extended utilities for working with files and filesystems in Rust. - -#![doc(html_root_url = "https://docs.rs/fs2/0.4.3")] - -#![cfg_attr(test, feature(test))] - -#[cfg(windows)] -extern crate winapi; - -#[cfg(unix)] -mod unix; -#[cfg(unix)] -use unix as sys; - -#[cfg(windows)] -mod windows; -#[cfg(windows)] -use windows as sys; - -use std::fs::File; -use std::io::{Error, Result}; -use std::path::Path; - -/// Extension trait for `std::fs::File` which provides allocation, duplication and locking methods. -/// -/// ## Notes on File Locks -/// -/// This library provides whole-file locks in both shared (read) and exclusive -/// (read-write) varieties. -/// -/// File locks are a cross-platform hazard since the file lock APIs exposed by -/// operating system kernels vary in subtle and not-so-subtle ways. -/// -/// The API exposed by this library can be safely used across platforms as long -/// as the following rules are followed: -/// -/// * Multiple locks should not be created on an individual `File` instance -/// concurrently. -/// * Duplicated files should not be locked without great care. -/// * Files to be locked should be opened with at least read or write -/// permissions. -/// * File locks may only be relied upon to be advisory. -/// -/// See the tests in `lib.rs` for cross-platform lock behavior that may be -/// relied upon; see the tests in `unix.rs` and `windows.rs` for examples of -/// platform-specific behavior. File locks are implemented with -/// [`flock(2)`](http://man7.org/linux/man-pages/man2/flock.2.html) on Unix and -/// [`LockFile`](https://msdn.microsoft.com/en-us/library/windows/desktop/aa365202(v=vs.85).aspx) -/// on Windows. -pub trait FileExt { - - /// Returns a duplicate instance of the file. - /// - /// The returned file will share the same file position as the original - /// file. - /// - /// If using rustc version 1.9 or later, prefer using `File::try_clone` to this. - /// - /// # Notes - /// - /// This is implemented with - /// [`dup(2)`](http://man7.org/linux/man-pages/man2/dup.2.html) on Unix and - /// [`DuplicateHandle`](https://msdn.microsoft.com/en-us/library/windows/desktop/ms724251(v=vs.85).aspx) - /// on Windows. - fn duplicate(&self) -> Result; - - /// Returns the amount of physical space allocated for a file. - fn allocated_size(&self) -> Result; - - /// Ensures that at least `len` bytes of disk space are allocated for the - /// file, and the file size is at least `len` bytes. After a successful call - /// to `allocate`, subsequent writes to the file within the specified length - /// are guaranteed not to fail because of lack of disk space. - fn allocate(&self, len: u64) -> Result<()>; - - /// Locks the file for shared usage, blocking if the file is currently - /// locked exclusively. - fn lock_shared(&self) -> Result<()>; - - /// Locks the file for exclusive usage, blocking if the file is currently - /// locked. - fn lock_exclusive(&self) -> Result<()>; - - /// Locks the file for shared usage, or returns a an error if the file is - /// currently locked (see `lock_contended_error`). - fn try_lock_shared(&self) -> Result<()>; - - /// Locks the file for shared usage, or returns a an error if the file is - /// currently locked (see `lock_contended_error`). - fn try_lock_exclusive(&self) -> Result<()>; - - /// Unlocks the file. - fn unlock(&self) -> Result<()>; -} - -impl FileExt for File { - fn duplicate(&self) -> Result { - sys::duplicate(self) - } - fn allocated_size(&self) -> Result { - sys::allocated_size(self) - } - fn allocate(&self, len: u64) -> Result<()> { - sys::allocate(self, len) - } - fn lock_shared(&self) -> Result<()> { - sys::lock_shared(self) - } - fn lock_exclusive(&self) -> Result<()> { - sys::lock_exclusive(self) - } - fn try_lock_shared(&self) -> Result<()> { - sys::try_lock_shared(self) - } - fn try_lock_exclusive(&self) -> Result<()> { - sys::try_lock_exclusive(self) - } - fn unlock(&self) -> Result<()> { - sys::unlock(self) - } -} - -/// Returns the error that a call to a try lock method on a contended file will -/// return. -pub fn lock_contended_error() -> Error { - sys::lock_error() -} - -/// `FsStats` contains some common stats about a file system. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct FsStats { - free_space: u64, - available_space: u64, - total_space: u64, - allocation_granularity: u64, -} - -impl FsStats { - /// Returns the number of free bytes in the file system containing the provided - /// path. - pub fn free_space(&self) -> u64 { - self.free_space - } - - /// Returns the available space in bytes to non-priveleged users in the file - /// system containing the provided path. - pub fn available_space(&self) -> u64 { - self.available_space - } - - /// Returns the total space in bytes in the file system containing the provided - /// path. - pub fn total_space(&self) -> u64 { - self.total_space - } - - /// Returns the filesystem's disk space allocation granularity in bytes. - /// The provided path may be for any file in the filesystem. - /// - /// On Posix, this is equivalent to the filesystem's block size. - /// On Windows, this is equivalent to the filesystem's cluster size. - pub fn allocation_granularity(&self) -> u64 { - self.allocation_granularity - } -} - -/// Get the stats of the file system containing the provided path. -pub fn statvfs

(path: P) -> Result where P: AsRef { - sys::statvfs(path.as_ref()) -} - -/// Returns the number of free bytes in the file system containing the provided -/// path. -pub fn free_space

(path: P) -> Result where P: AsRef { - statvfs(path).map(|stat| stat.free_space) -} - -/// Returns the available space in bytes to non-priveleged users in the file -/// system containing the provided path. -pub fn available_space

(path: P) -> Result where P: AsRef { - statvfs(path).map(|stat| stat.available_space) -} - -/// Returns the total space in bytes in the file system containing the provided -/// path. -pub fn total_space

(path: P) -> Result where P: AsRef { - statvfs(path).map(|stat| stat.total_space) -} - -/// Returns the filesystem's disk space allocation granularity in bytes. -/// The provided path may be for any file in the filesystem. -/// -/// On Posix, this is equivalent to the filesystem's block size. -/// On Windows, this is equivalent to the filesystem's cluster size. -pub fn allocation_granularity

(path: P) -> Result where P: AsRef { - statvfs(path).map(|stat| stat.allocation_granularity) -} - -#[cfg(test)] -mod test { - - extern crate tempdir; - extern crate test; - - use std::fs; - use super::*; - use std::io::{Read, Seek, SeekFrom, Write}; - - /// Tests file duplication. - #[test] - fn duplicate() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let mut file1 = - fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - let mut file2 = file1.duplicate().unwrap(); - - // Write into the first file and then drop it. - file1.write_all(b"foo").unwrap(); - drop(file1); - - let mut buf = vec![]; - - // Read from the second file; since the position is shared it will already be at EOF. - file2.read_to_end(&mut buf).unwrap(); - assert_eq!(0, buf.len()); - - // Rewind and read. - file2.seek(SeekFrom::Start(0)).unwrap(); - file2.read_to_end(&mut buf).unwrap(); - assert_eq!(&buf, &b"foo"); - } - - /// Tests shared file lock operations. - #[test] - fn lock_shared() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - let file2 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - let file3 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - - // Concurrent shared access is OK, but not shared and exclusive. - file1.lock_shared().unwrap(); - file2.lock_shared().unwrap(); - assert_eq!(file3.try_lock_exclusive().unwrap_err().kind(), - lock_contended_error().kind()); - file1.unlock().unwrap(); - assert_eq!(file3.try_lock_exclusive().unwrap_err().kind(), - lock_contended_error().kind()); - - // Once all shared file locks are dropped, an exclusive lock may be created; - file2.unlock().unwrap(); - file3.lock_exclusive().unwrap(); - } - - /// Tests exclusive file lock operations. - #[test] - fn lock_exclusive() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - let file2 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - - // No other access is possible once an exclusive lock is created. - file1.lock_exclusive().unwrap(); - assert_eq!(file2.try_lock_exclusive().unwrap_err().kind(), - lock_contended_error().kind()); - assert_eq!(file2.try_lock_shared().unwrap_err().kind(), - lock_contended_error().kind()); - - // Once the exclusive lock is dropped, the second file is able to create a lock. - file1.unlock().unwrap(); - file2.lock_exclusive().unwrap(); - } - - /// Tests that a lock is released after the file that owns it is dropped. - #[test] - fn lock_cleanup() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - let file2 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - - file1.lock_exclusive().unwrap(); - assert_eq!(file2.try_lock_shared().unwrap_err().kind(), - lock_contended_error().kind()); - - // Drop file1; the lock should be released. - drop(file1); - file2.lock_shared().unwrap(); - } - - /// Tests file allocation. - #[test] - fn allocate() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); - let blksize = allocation_granularity(&path).unwrap(); - - // New files are created with no allocated size. - assert_eq!(0, file.allocated_size().unwrap()); - assert_eq!(0, file.metadata().unwrap().len()); - - // Allocate space for the file, checking that the allocated size steps - // up by block size, and the file length matches the allocated size. - - file.allocate(2 * blksize - 1).unwrap(); - assert_eq!(2 * blksize, file.allocated_size().unwrap()); - assert_eq!(2 * blksize - 1, file.metadata().unwrap().len()); - - // Truncate the file, checking that the allocated size steps down by - // block size. - - file.set_len(blksize + 1).unwrap(); - assert_eq!(2 * blksize, file.allocated_size().unwrap()); - assert_eq!(blksize + 1, file.metadata().unwrap().len()); - } - - /// Checks filesystem space methods. - #[test] - fn filesystem_space() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let total_space = total_space(&tempdir.path()).unwrap(); - let free_space = free_space(&tempdir.path()).unwrap(); - let available_space = available_space(&tempdir.path()).unwrap(); - - assert!(total_space > free_space); - assert!(total_space > available_space); - assert!(available_space <= free_space); - } - - /// Benchmarks creating and removing a file. This is a baseline benchmark - /// for comparing against the truncate and allocate benchmarks. - #[bench] - fn bench_file_create(b: &mut test::Bencher) { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("file"); - - b.iter(|| { - fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(&path) - .unwrap(); - fs::remove_file(&path).unwrap(); - }); - } - - /// Benchmarks creating a file, truncating it to 32MiB, and deleting it. - #[bench] - fn bench_file_truncate(b: &mut test::Bencher) { - let size = 32 * 1024 * 1024; - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("file"); - - b.iter(|| { - let file = fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(&path) - .unwrap(); - file.set_len(size).unwrap(); - fs::remove_file(&path).unwrap(); - }); - } - - /// Benchmarks creating a file, allocating 32MiB for it, and deleting it. - #[bench] - fn bench_file_allocate(b: &mut test::Bencher) { - let size = 32 * 1024 * 1024; - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("file"); - - b.iter(|| { - let file = fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(&path) - .unwrap(); - file.allocate(size).unwrap(); - fs::remove_file(&path).unwrap(); - }); - } - - /// Benchmarks creating a file, allocating 32MiB for it, and deleting it. - #[bench] - fn bench_allocated_size(b: &mut test::Bencher) { - let size = 32 * 1024 * 1024; - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("file"); - let file = fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(&path) - .unwrap(); - file.allocate(size).unwrap(); - - b.iter(|| { - file.allocated_size().unwrap(); - }); - } - - /// Benchmarks duplicating a file descriptor or handle. - #[bench] - fn bench_duplicate(b: &mut test::Bencher) { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - - b.iter(|| test::black_box(file.duplicate().unwrap())); - } - - /// Benchmarks locking and unlocking a file lock. - #[bench] - fn bench_lock_unlock(b: &mut test::Bencher) { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - - b.iter(|| { - file.lock_exclusive().unwrap(); - file.unlock().unwrap(); - }); - } - - /// Benchmarks the free space method. - #[bench] - fn bench_free_space(b: &mut test::Bencher) { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - b.iter(|| { - test::black_box(free_space(&tempdir.path()).unwrap()); - }); - } - - /// Benchmarks the available space method. - #[bench] - fn bench_available_space(b: &mut test::Bencher) { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - b.iter(|| { - test::black_box(available_space(&tempdir.path()).unwrap()); - }); - } - - /// Benchmarks the total space method. - #[bench] - fn bench_total_space(b: &mut test::Bencher) { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - b.iter(|| { - test::black_box(total_space(&tempdir.path()).unwrap()); - }); - } -} diff -Nru cargo-0.44.1/vendor/fs2/src/unix.rs cargo-0.47.0/vendor/fs2/src/unix.rs --- cargo-0.44.1/vendor/fs2/src/unix.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/fs2/src/unix.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,250 +0,0 @@ -extern crate libc; - -use std::ffi::CString; -use std::fs::File; -use std::io::{Error, ErrorKind, Result}; -use std::mem; -use std::os::unix::ffi::OsStrExt; -use std::os::unix::fs::MetadataExt; -use std::os::unix::io::{AsRawFd, FromRawFd}; -use std::path::Path; - -use FsStats; - -pub fn duplicate(file: &File) -> Result { - unsafe { - let fd = libc::dup(file.as_raw_fd()); - - if fd < 0 { - Err(Error::last_os_error()) - } else { - Ok(File::from_raw_fd(fd)) - } - } -} - -pub fn lock_shared(file: &File) -> Result<()> { - flock(file, libc::LOCK_SH) -} - -pub fn lock_exclusive(file: &File) -> Result<()> { - flock(file, libc::LOCK_EX) -} - -pub fn try_lock_shared(file: &File) -> Result<()> { - flock(file, libc::LOCK_SH | libc::LOCK_NB) -} - -pub fn try_lock_exclusive(file: &File) -> Result<()> { - flock(file, libc::LOCK_EX | libc::LOCK_NB) -} - -pub fn unlock(file: &File) -> Result<()> { - flock(file, libc::LOCK_UN) -} - -pub fn lock_error() -> Error { - Error::from_raw_os_error(libc::EWOULDBLOCK) -} - -#[cfg(not(target_os = "solaris"))] -fn flock(file: &File, flag: libc::c_int) -> Result<()> { - let ret = unsafe { libc::flock(file.as_raw_fd(), flag) }; - if ret < 0 { Err(Error::last_os_error()) } else { Ok(()) } -} - -/// Simulate flock() using fcntl(); primarily for Oracle Solaris. -#[cfg(target_os = "solaris")] -fn flock(file: &File, flag: libc::c_int) -> Result<()> { - let mut fl = libc::flock { - l_whence: 0, - l_start: 0, - l_len: 0, - l_type: 0, - l_pad: [0; 4], - l_pid: 0, - l_sysid: 0, - }; - - // In non-blocking mode, use F_SETLK for cmd, F_SETLKW otherwise, and don't forget to clear - // LOCK_NB. - let (cmd, operation) = match flag & libc::LOCK_NB { - 0 => (libc::F_SETLKW, flag), - _ => (libc::F_SETLK, flag & !libc::LOCK_NB), - }; - - match operation { - libc::LOCK_SH => fl.l_type |= libc::F_RDLCK, - libc::LOCK_EX => fl.l_type |= libc::F_WRLCK, - libc::LOCK_UN => fl.l_type |= libc::F_UNLCK, - _ => return Err(Error::from_raw_os_error(libc::EINVAL)), - } - - let ret = unsafe { libc::fcntl(file.as_raw_fd(), cmd, &fl) }; - match ret { - // Translate EACCES to EWOULDBLOCK - -1 => match Error::last_os_error().raw_os_error() { - Some(libc::EACCES) => return Err(lock_error()), - _ => return Err(Error::last_os_error()) - }, - _ => Ok(()) - } -} - -pub fn allocated_size(file: &File) -> Result { - file.metadata().map(|m| m.blocks() as u64 * 512) -} - -#[cfg(any(target_os = "linux", - target_os = "freebsd", - target_os = "android", - target_os = "nacl"))] -pub fn allocate(file: &File, len: u64) -> Result<()> { - let ret = unsafe { libc::posix_fallocate(file.as_raw_fd(), 0, len as libc::off_t) }; - if ret == 0 { Ok(()) } else { Err(Error::last_os_error()) } -} - -#[cfg(any(target_os = "macos", target_os = "ios"))] -pub fn allocate(file: &File, len: u64) -> Result<()> { - let stat = try!(file.metadata()); - - if len > stat.blocks() as u64 * 512 { - let mut fstore = libc::fstore_t { - fst_flags: libc::F_ALLOCATECONTIG, - fst_posmode: libc::F_PEOFPOSMODE, - fst_offset: 0, - fst_length: len as libc::off_t, - fst_bytesalloc: 0, - }; - - let ret = unsafe { libc::fcntl(file.as_raw_fd(), libc::F_PREALLOCATE, &fstore) }; - if ret == -1 { - // Unable to allocate contiguous disk space; attempt to allocate non-contiguously. - fstore.fst_flags = libc::F_ALLOCATEALL; - let ret = unsafe { libc::fcntl(file.as_raw_fd(), libc::F_PREALLOCATE, &fstore) }; - if ret == -1 { - return Err(Error::last_os_error()); - } - } - } - - if len > stat.size() as u64 { - file.set_len(len) - } else { - Ok(()) - } -} - -#[cfg(any(target_os = "openbsd", - target_os = "netbsd", - target_os = "dragonfly", - target_os = "solaris", - target_os = "haiku"))] -pub fn allocate(file: &File, len: u64) -> Result<()> { - // No file allocation API available, just set the length if necessary. - if len > try!(file.metadata()).len() as u64 { - file.set_len(len) - } else { - Ok(()) - } -} - -pub fn statvfs(path: &Path) -> Result { - let cstr = match CString::new(path.as_os_str().as_bytes()) { - Ok(cstr) => cstr, - Err(..) => return Err(Error::new(ErrorKind::InvalidInput, "path contained a null")), - }; - - unsafe { - let mut stat: libc::statvfs = mem::zeroed(); - // danburkert/fs2-rs#1: cast is necessary for platforms where c_char != u8. - if libc::statvfs(cstr.as_ptr() as *const _, &mut stat) != 0 { - Err(Error::last_os_error()) - } else { - Ok(FsStats { - free_space: stat.f_frsize as u64 * stat.f_bfree as u64, - available_space: stat.f_frsize as u64 * stat.f_bavail as u64, - total_space: stat.f_frsize as u64 * stat.f_blocks as u64, - allocation_granularity: stat.f_frsize as u64, - }) - } - } -} - -#[cfg(test)] -mod test { - extern crate tempdir; - extern crate libc; - - use std::fs::{self, File}; - use std::os::unix::io::AsRawFd; - - use {FileExt, lock_contended_error}; - - /// The duplicate method returns a file with a new file descriptor. - #[test] - fn duplicate_new_fd() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); - let file2 = file1.duplicate().unwrap(); - assert!(file1.as_raw_fd() != file2.as_raw_fd()); - } - - /// The duplicate method should preservesthe close on exec flag. - #[test] - fn duplicate_cloexec() { - - fn flags(file: &File) -> libc::c_int { - unsafe { libc::fcntl(file.as_raw_fd(), libc::F_GETFL, 0) } - } - - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); - let file2 = file1.duplicate().unwrap(); - - assert_eq!(flags(&file1), flags(&file2)); - } - - /// Tests that locking a file descriptor will replace any existing locks - /// held on the file descriptor. - #[test] - fn lock_replace() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); - let file2 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); - - // Creating a shared lock will drop an exclusive lock. - file1.lock_exclusive().unwrap(); - file1.lock_shared().unwrap(); - file2.lock_shared().unwrap(); - - // Attempting to replace a shared lock with an exclusive lock will fail - // with multiple lock holders, and remove the original shared lock. - assert_eq!(file2.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - file1.lock_shared().unwrap(); - } - - /// Tests that locks are shared among duplicated file descriptors. - #[test] - fn lock_duplicate() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); - let file2 = file1.duplicate().unwrap(); - let file3 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); - - // Create a lock through fd1, then replace it through fd2. - file1.lock_shared().unwrap(); - file2.lock_exclusive().unwrap(); - assert_eq!(file3.try_lock_shared().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - - // Either of the file descriptors should be able to unlock. - file1.unlock().unwrap(); - file3.lock_shared().unwrap(); - } -} diff -Nru cargo-0.44.1/vendor/fs2/src/windows.rs cargo-0.47.0/vendor/fs2/src/windows.rs --- cargo-0.44.1/vendor/fs2/src/windows.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/fs2/src/windows.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,279 +0,0 @@ -use std::fs::File; -use std::io::{Error, Result}; -use std::mem; -use std::os::windows::ffi::OsStrExt; -use std::os::windows::io::{AsRawHandle, FromRawHandle}; -use std::path::Path; -use std::ptr; - -use winapi::shared::minwindef::{BOOL, DWORD}; -use winapi::shared::winerror::ERROR_LOCK_VIOLATION; -use winapi::um::fileapi::{FILE_ALLOCATION_INFO, FILE_STANDARD_INFO, GetDiskFreeSpaceW}; -use winapi::um::fileapi::{GetVolumePathNameW, LockFileEx, UnlockFile, SetFileInformationByHandle}; -use winapi::um::handleapi::DuplicateHandle; -use winapi::um::minwinbase::{FileAllocationInfo, FileStandardInfo}; -use winapi::um::minwinbase::{LOCKFILE_FAIL_IMMEDIATELY, LOCKFILE_EXCLUSIVE_LOCK}; -use winapi::um::processthreadsapi::GetCurrentProcess; -use winapi::um::winbase::GetFileInformationByHandleEx; -use winapi::um::winnt::DUPLICATE_SAME_ACCESS; - -use FsStats; - -pub fn duplicate(file: &File) -> Result { - unsafe { - let mut handle = ptr::null_mut(); - let current_process = GetCurrentProcess(); - let ret = DuplicateHandle(current_process, - file.as_raw_handle(), - current_process, - &mut handle, - 0, - true as BOOL, - DUPLICATE_SAME_ACCESS); - if ret == 0 { - Err(Error::last_os_error()) - } else { - Ok(File::from_raw_handle(handle)) - } - } -} - -pub fn allocated_size(file: &File) -> Result { - unsafe { - let mut info: FILE_STANDARD_INFO = mem::zeroed(); - - let ret = GetFileInformationByHandleEx( - file.as_raw_handle(), - FileStandardInfo, - &mut info as *mut _ as *mut _, - mem::size_of::() as DWORD); - - if ret == 0 { - Err(Error::last_os_error()) - } else { - Ok(*info.AllocationSize.QuadPart() as u64) - } - } -} - -pub fn allocate(file: &File, len: u64) -> Result<()> { - if try!(allocated_size(file)) < len { - unsafe { - let mut info: FILE_ALLOCATION_INFO = mem::zeroed(); - *info.AllocationSize.QuadPart_mut() = len as i64; - let ret = SetFileInformationByHandle( - file.as_raw_handle(), - FileAllocationInfo, - &mut info as *mut _ as *mut _, - mem::size_of::() as DWORD); - if ret == 0 { - return Err(Error::last_os_error()); - } - } - } - if try!(file.metadata()).len() < len { - file.set_len(len) - } else { - Ok(()) - } -} - -pub fn lock_shared(file: &File) -> Result<()> { - lock_file(file, 0) -} - -pub fn lock_exclusive(file: &File) -> Result<()> { - lock_file(file, LOCKFILE_EXCLUSIVE_LOCK) -} - -pub fn try_lock_shared(file: &File) -> Result<()> { - lock_file(file, LOCKFILE_FAIL_IMMEDIATELY) -} - -pub fn try_lock_exclusive(file: &File) -> Result<()> { - lock_file(file, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY) -} - -pub fn unlock(file: &File) -> Result<()> { - unsafe { - let ret = UnlockFile(file.as_raw_handle(), 0, 0, !0, !0); - if ret == 0 { Err(Error::last_os_error()) } else { Ok(()) } - } -} - -pub fn lock_error() -> Error { - Error::from_raw_os_error(ERROR_LOCK_VIOLATION as i32) -} - -fn lock_file(file: &File, flags: DWORD) -> Result<()> { - unsafe { - let mut overlapped = mem::zeroed(); - let ret = LockFileEx(file.as_raw_handle(), flags, 0, !0, !0, &mut overlapped); - if ret == 0 { Err(Error::last_os_error()) } else { Ok(()) } - } -} - -fn volume_path(path: &Path, volume_path: &mut [u16]) -> Result<()> { - let path_utf8: Vec = path.as_os_str().encode_wide().chain(Some(0)).collect(); - unsafe { - let ret = GetVolumePathNameW(path_utf8.as_ptr(), - volume_path.as_mut_ptr(), - volume_path.len() as DWORD); - if ret == 0 { Err(Error::last_os_error()) } else { Ok(()) - } - } -} - -pub fn statvfs(path: &Path) -> Result { - let root_path: &mut [u16] = &mut [0; 261]; - try!(volume_path(path, root_path)); - unsafe { - - let mut sectors_per_cluster = 0; - let mut bytes_per_sector = 0; - let mut number_of_free_clusters = 0; - let mut total_number_of_clusters = 0; - let ret = GetDiskFreeSpaceW(root_path.as_ptr(), - &mut sectors_per_cluster, - &mut bytes_per_sector, - &mut number_of_free_clusters, - &mut total_number_of_clusters); - if ret == 0 { - Err(Error::last_os_error()) - } else { - let bytes_per_cluster = sectors_per_cluster as u64 * bytes_per_sector as u64; - let free_space = bytes_per_cluster * number_of_free_clusters as u64; - let total_space = bytes_per_cluster * total_number_of_clusters as u64; - Ok(FsStats { - free_space: free_space, - available_space: free_space, - total_space: total_space, - allocation_granularity: bytes_per_cluster, - }) - } - } -} - -#[cfg(test)] -mod test { - - extern crate tempdir; - - use std::fs; - use std::os::windows::io::AsRawHandle; - - use {FileExt, lock_contended_error}; - - /// The duplicate method returns a file with a new file handle. - #[test] - fn duplicate_new_handle() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().write(true).create(true).open(&path).unwrap(); - let file2 = file1.duplicate().unwrap(); - assert!(file1.as_raw_handle() != file2.as_raw_handle()); - } - - /// A duplicated file handle does not have access to the original handle's locks. - #[test] - fn lock_duplicate_handle_independence() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - let file2 = file1.duplicate().unwrap(); - - // Locking the original file handle will block the duplicate file handle from opening a lock. - file1.lock_shared().unwrap(); - assert_eq!(file2.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - - // Once the original file handle is unlocked, the duplicate handle can proceed with a lock. - file1.unlock().unwrap(); - file2.lock_exclusive().unwrap(); - } - - /// A file handle may not be exclusively locked multiple times, or exclusively locked and then - /// shared locked. - #[test] - fn lock_non_reentrant() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - - // Multiple exclusive locks fails. - file.lock_exclusive().unwrap(); - assert_eq!(file.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - file.unlock().unwrap(); - - // Shared then Exclusive locks fails. - file.lock_shared().unwrap(); - assert_eq!(file.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - } - - /// A file handle can hold an exclusive lock and any number of shared locks, all of which must - /// be unlocked independently. - #[test] - fn lock_layering() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - - // Open two shared locks on the file, and then try and fail to open an exclusive lock. - file.lock_exclusive().unwrap(); - file.lock_shared().unwrap(); - file.lock_shared().unwrap(); - assert_eq!(file.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - - // Pop one of the shared locks and try again. - file.unlock().unwrap(); - assert_eq!(file.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - - // Pop the second shared lock and try again. - file.unlock().unwrap(); - assert_eq!(file.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - - // Pop the exclusive lock and finally succeed. - file.unlock().unwrap(); - file.lock_exclusive().unwrap(); - } - - /// A file handle with multiple open locks will have all locks closed on drop. - #[test] - fn lock_layering_cleanup() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - let file2 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - - // Open two shared locks on the file, and then try and fail to open an exclusive lock. - file1.lock_shared().unwrap(); - assert_eq!(file2.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - - drop(file1); - file2.lock_exclusive().unwrap(); - } - - /// A file handle's locks will not be released until the original handle and all of its - /// duplicates have been closed. This on really smells like a bug in Windows. - #[test] - fn lock_duplicate_cleanup() { - let tempdir = tempdir::TempDir::new("fs2").unwrap(); - let path = tempdir.path().join("fs2"); - let file1 = fs::OpenOptions::new().read(true).write(true).create(true).open(&path).unwrap(); - let file2 = file1.duplicate().unwrap(); - - // Open a lock on the original handle, then close it. - file1.lock_shared().unwrap(); - drop(file1); - - // Attempting to create a lock on the file with the duplicate handle will fail. - assert_eq!(file2.try_lock_exclusive().unwrap_err().raw_os_error(), - lock_contended_error().raw_os_error()); - } -} diff -Nru cargo-0.44.1/vendor/getrandom/.cargo-checksum.json cargo-0.47.0/vendor/getrandom/.cargo-checksum.json --- cargo-0.44.1/vendor/getrandom/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/getrandom/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb"} \ No newline at end of file +{"files":{},"package":"fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/getrandom/Cargo.toml cargo-0.47.0/vendor/getrandom/Cargo.toml --- cargo-0.44.1/vendor/getrandom/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/getrandom/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "getrandom" -version = "0.1.14" +version = "0.1.15" authors = ["The Rand Project Developers"] exclude = ["utils/*", ".*", "appveyor.yml"] description = "A small cross-platform library for retrieving random data from system source" diff -Nru cargo-0.44.1/vendor/getrandom/CHANGELOG.md cargo-0.47.0/vendor/getrandom/CHANGELOG.md --- cargo-0.44.1/vendor/getrandom/CHANGELOG.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/getrandom/CHANGELOG.md 2020-10-01 21:38:28.000000000 +0000 @@ -4,6 +4,14 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.1.15] - 2020-09-10 +### Changed +- Added support for Internet Explorer 11 [#139] +- Fix Webpack require warning with `wasm-bindgen` [#137] + +[#137]: https://github.com/rust-random/getrandom/pull/137 +[#139]: https://github.com/rust-random/getrandom/pull/139 + ## [0.1.14] - 2020-01-07 ### Changed - Remove use of spin-locks in the `use_file` module. [#125] diff -Nru cargo-0.44.1/vendor/getrandom/.pc/drop-wasi.patch/Cargo.toml cargo-0.47.0/vendor/getrandom/.pc/drop-wasi.patch/Cargo.toml --- cargo-0.44.1/vendor/getrandom/.pc/drop-wasi.patch/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/getrandom/.pc/drop-wasi.patch/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "getrandom" -version = "0.1.14" +version = "0.1.15" authors = ["The Rand Project Developers"] exclude = ["utils/*", ".*", "appveyor.yml"] description = "A small cross-platform library for retrieving random data from system source" diff -Nru cargo-0.44.1/vendor/getrandom/src/wasm32_bindgen.rs cargo-0.47.0/vendor/getrandom/src/wasm32_bindgen.rs --- cargo-0.44.1/vendor/getrandom/src/wasm32_bindgen.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/getrandom/src/wasm32_bindgen.rs 2020-10-01 21:38:28.000000000 +0000 @@ -60,17 +60,17 @@ fn getrandom_init() -> Result { if let Ok(self_) = Global::get_self() { // If `self` is defined then we're in a browser somehow (main window - // or web worker). Here we want to try to use - // `crypto.getRandomValues`, but if `crypto` isn't defined we assume - // we're in an older web browser and the OS RNG isn't available. - - let crypto = self_.crypto(); - if crypto.is_undefined() { - return Err(BINDGEN_CRYPTO_UNDEF); - } + // or web worker). We get `self.crypto` (called `msCrypto` on IE), so we + // can call `crypto.getRandomValues`. If `crypto` isn't defined, we + // assume we're in an older web browser and the OS RNG isn't available. + + let crypto: BrowserCrypto = match (self_.crypto(), self_.ms_crypto()) { + (crypto, _) if !crypto.is_undefined() => crypto.into(), + (_, crypto) if !crypto.is_undefined() => crypto.into(), + _ => return Err(BINDGEN_CRYPTO_UNDEF), + }; // Test if `crypto.getRandomValues` is undefined as well - let crypto: BrowserCrypto = crypto.into(); if crypto.get_random_values_fn().is_undefined() { return Err(BINDGEN_GRV_UNDEF); } @@ -78,7 +78,7 @@ return Ok(RngSource::Browser(crypto)); } - return Ok(RngSource::Node(node_require("crypto"))); + return Ok(RngSource::Node(MODULE.require("crypto"))); } #[wasm_bindgen] @@ -88,6 +88,8 @@ fn get_self() -> Result; type Self_; + #[wasm_bindgen(method, getter, js_name = "msCrypto", structural)] + fn ms_crypto(me: &Self_) -> JsValue; #[wasm_bindgen(method, getter, structural)] fn crypto(me: &Self_) -> JsValue; @@ -102,12 +104,17 @@ #[wasm_bindgen(method, js_name = getRandomValues, structural)] fn get_random_values(me: &BrowserCrypto, buf: &mut [u8]); - #[wasm_bindgen(js_name = require)] - fn node_require(s: &str) -> NodeCrypto; - #[derive(Clone, Debug)] type NodeCrypto; #[wasm_bindgen(method, js_name = randomFillSync, structural)] fn random_fill_sync(me: &NodeCrypto, buf: &mut [u8]); + + type NodeModule; + + #[wasm_bindgen(js_name = module)] + static MODULE: NodeModule; + + #[wasm_bindgen(method)] + fn require(this: &NodeModule, s: &str) -> NodeCrypto; } diff -Nru cargo-0.44.1/vendor/getrandom/tests/common.rs cargo-0.47.0/vendor/getrandom/tests/common.rs --- cargo-0.44.1/vendor/getrandom/tests/common.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/getrandom/tests/common.rs 2020-10-01 21:38:28.000000000 +0000 @@ -38,7 +38,7 @@ getrandom(&mut huge).unwrap(); } -#[cfg(any(unix, windows, target_os = "redox", target_os = "fuchsia"))] +#[cfg(not(target_arch = "wasm32"))] #[test] fn test_multithreading() { use std::sync::mpsc::channel; diff -Nru cargo-0.44.1/vendor/git2/.cargo-checksum.json cargo-0.47.0/vendor/git2/.cargo-checksum.json --- cargo-0.44.1/vendor/git2/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"11e4b2082980e751c4bf4273e9cbb4a02c655729c8ee8a79f66cad03c8f4d31e"} \ No newline at end of file +{"files":{},"package":"1e094214efbc7fdbbdee952147e493b00e99a4e52817492277e98967ae918165"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/git2/Cargo.lock cargo-0.47.0/vendor/git2/Cargo.lock --- cargo-0.44.1/vendor/git2/Cargo.lock 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/Cargo.lock 2020-10-01 21:38:28.000000000 +0000 @@ -4,538 +4,539 @@ name = "ansi_term" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" dependencies = [ - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi", ] [[package]] name = "atty" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi", + "libc", + "winapi", ] [[package]] name = "autocfg" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "bitflags" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "cc" -version = "1.0.50" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66120af515773fb005778dc07c261bd201ec8ce50bd6e7144c927753fe013381" dependencies = [ - "jobserver 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "jobserver", ] [[package]] name = "cfg-if" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" [[package]] name = "clap" -version = "2.33.0" +version = "2.33.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" dependencies = [ - "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ansi_term", + "atty", + "bitflags", + "strsim", + "textwrap", + "unicode-width", + "vec_map", +] + +[[package]] +name = "cmake" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e56268c17a6248366d66d4a47a3381369d068cce8409bb1716ed77ea32163bb" +dependencies = [ + "cc", ] [[package]] name = "getrandom" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", - "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "git2" -version = "0.13.6" +version = "0.13.11" dependencies = [ - "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", - "libgit2-sys 0.12.6+1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.54 (registry+https://github.com/rust-lang/crates.io-index)", - "structopt 0.3.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", - "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags", + "libc", + "libgit2-sys", + "log", + "openssl-probe", + "openssl-sys", + "structopt", + "tempfile", + "thread-id", + "time", + "url", ] [[package]] name = "heck" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" dependencies = [ - "unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-segmentation", ] [[package]] name = "hermit-abi" -version = "0.1.10" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9" dependencies = [ - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", ] [[package]] name = "idna" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "matches", + "unicode-bidi", + "unicode-normalization", ] [[package]] name = "jobserver" version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2" dependencies = [ - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", ] [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.67" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "755456fae044e6fa1ebbbd1b3e902ae19e73097ed4ed87bb79934a867c007bc3" [[package]] name = "libgit2-sys" -version = "0.12.6+1.0.0" +version = "0.12.13+1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "069eea34f76ec15f2822ccf78fe0cdb8c9016764d0a12865278585a74dbdeae5" dependencies = [ - "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", - "libssh2-sys 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "libz-sys 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.54 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)", + "cc", + "libc", + "libssh2-sys", + "libz-sys", + "openssl-sys", + "pkg-config", ] [[package]] name = "libssh2-sys" -version = "0.2.16" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca46220853ba1c512fc82826d0834d87b06bcd3c2a42241b7de72f3d2fe17056" dependencies = [ - "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", - "libz-sys 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.54 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)", - "vcpkg 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "cc", + "libc", + "libz-sys", + "openssl-sys", + "pkg-config", + "vcpkg", ] [[package]] name = "libz-sys" -version = "1.0.25" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655" dependencies = [ - "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)", - "vcpkg 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "cc", + "cmake", + "libc", + "pkg-config", + "vcpkg", ] [[package]] name = "log" -version = "0.4.8" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", ] [[package]] name = "matches" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" [[package]] name = "openssl-probe" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-src" -version = "111.6.1+1.1.1d" +version = "111.10.2+1.1.1g" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a287fdb22e32b5b60624d4a5a7a02dbe82777f730ec0dbc42a0554326fef5a70" dependencies = [ - "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", + "cc", ] [[package]] name = "openssl-sys" -version = "0.9.54" +version = "0.9.58" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" dependencies = [ - "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-src 111.6.1+1.1.1d (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)", - "vcpkg 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg", + "cc", + "libc", + "openssl-src", + "pkg-config", + "vcpkg", ] [[package]] name = "percent-encoding" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pkg-config" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" [[package]] name = "ppv-lite86" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c36fa947111f5c62a733b652544dd0016a43ce89619538a8ef92724a6f501a20" [[package]] name = "proc-macro-error" -version = "0.4.12" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ - "proc-macro-error-attr 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn", + "version_check", ] [[package]] name = "proc-macro-error-attr" -version = "0.4.12" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", - "syn-mid 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "version_check", ] [[package]] name = "proc-macro2" -version = "1.0.9" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "175c513d55719db99da20232b06cda8bab6b83ec2d04e3283edf0213c37c1a29" dependencies = [ - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid", ] [[package]] name = "quote" -version = "1.0.3" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" dependencies = [ - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", ] [[package]] name = "rand" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom", + "libc", + "rand_chacha", + "rand_core", + "rand_hc", ] [[package]] name = "rand_chacha" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ - "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ppv-lite86", + "rand_core", ] [[package]] name = "rand_core" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom", ] [[package]] name = "rand_hc" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core", ] [[package]] name = "redox_syscall" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "remove_dir_all" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi", ] [[package]] -name = "smallvec" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] name = "strsim" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.12" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6cc388d94ffabf39b5ed5fadddc40147cb21e605f53db6f8f36a625d27489ac5" dependencies = [ - "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "structopt-derive 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "clap", + "lazy_static", + "structopt-derive", ] [[package]] name = "structopt-derive" -version = "0.4.5" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e2513111825077552a6751dfad9e11ce0fba07d7276a3943a037d7e93e64c5f" dependencies = [ - "heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro-error 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", + "heck", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", ] [[package]] name = "syn" -version = "1.0.16" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "963f7d3cc59b59b9325165add223142bbf1df27655d07789f109896d353d8350" dependencies = [ - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "syn-mid" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "unicode-xid", ] [[package]] name = "tempfile" version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "libc", + "rand", + "redox_syscall", + "remove_dir_all", + "winapi", ] [[package]] name = "textwrap" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" dependencies = [ - "unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width", ] [[package]] name = "thread-id" version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7fbf4c9d56b320106cd64fd024dadfa0be7cb4706725fc44a7d7ce952d820c1" dependencies = [ - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "redox_syscall", + "winapi", ] [[package]] name = "time" -version = "0.1.42" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ - "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi", ] [[package]] +name = "tinyvec" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "238ce071d267c5710f9d31451efec16c5ee22de34df17cc05e56cbc92e967117" + +[[package]] name = "unicode-bidi" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "matches", ] [[package]] name = "unicode-normalization" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" dependencies = [ - "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tinyvec", ] [[package]] name = "unicode-segmentation" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" [[package]] name = "unicode-width" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" [[package]] name = "unicode-xid" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" [[package]] name = "url" version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" dependencies = [ - "idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "idna", + "matches", + "percent-encoding", ] [[package]] name = "vcpkg" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" [[package]] name = "vec_map" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "winapi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ - "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" - -[metadata] -"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -"checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" -"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" -"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" -"checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" -"checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" -"checksum heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" -"checksum hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e" -"checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" -"checksum jobserver 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)" = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2" -"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -"checksum libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018" -"checksum libgit2-sys 0.12.6+1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bf81b43f9b45ab07897a780c9b7b26b1504497e469c7a78162fc29e3b8b1c1b3" -"checksum libssh2-sys 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)" = "7bb70f29dc7c31d32c97577f13f41221af981b31248083e347b7f2c39225a6bc" -"checksum libz-sys 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "2eb5e43362e38e2bca2fd5f5134c4d4564a23a5c28e9b95411652021a8675ebe" -"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" -"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" -"checksum openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" -"checksum openssl-src 111.6.1+1.1.1d (registry+https://github.com/rust-lang/crates.io-index)" = "c91b04cb43c1a8a90e934e0cd612e2a5715d976d2d6cff4490278a0cddf35005" -"checksum openssl-sys 0.9.54 (registry+https://github.com/rust-lang/crates.io-index)" = "1024c0a59774200a555087a6da3f253a9095a5f344e353b212ac4c8b8e450986" -"checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" -"checksum pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)" = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" -"checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" -"checksum proc-macro-error 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "18f33027081eba0a6d8aba6d1b1c3a3be58cbb12106341c2d5759fcd9b5277e7" -"checksum proc-macro-error-attr 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "8a5b4b77fdb63c1eca72173d68d24501c54ab1269409f6b672c85deb18af69de" -"checksum proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435" -"checksum quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" -"checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -"checksum rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -"checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -"checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" -"checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" -"checksum smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" -"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" -"checksum structopt 0.3.12 (registry+https://github.com/rust-lang/crates.io-index)" = "c8faa2719539bbe9d77869bfb15d4ee769f99525e707931452c97b693b3f159d" -"checksum structopt-derive 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3f88b8e18c69496aad6f9ddf4630dd7d585bcaf765786cb415b9aec2fe5a0430" -"checksum syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)" = "123bd9499cfb380418d509322d7a6d52e5315f064fe4b3ad18a53d6b92c07859" -"checksum syn-mid 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a" -"checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" -"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -"checksum thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c7fbf4c9d56b320106cd64fd024dadfa0be7cb4706725fc44a7d7ce952d820c1" -"checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" -"checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -"checksum unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" -"checksum unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" -"checksum unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479" -"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" -"checksum url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" -"checksum vcpkg 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3fc439f2794e98976c88a2a2dafce96b930fe8010b0a256b3c2199a773933168" -"checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" -"checksum version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" -"checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" -"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff -Nru cargo-0.44.1/vendor/git2/Cargo.toml cargo-0.47.0/vendor/git2/Cargo.toml --- cargo-0.44.1/vendor/git2/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "git2" -version = "0.13.6" +version = "0.13.11" authors = ["Josh Triplett ", "Alex Crichton "] description = "Bindings to libgit2 for interoperating with git repositories. This library is\nboth threadsafe and memory safe and allows both reading and writing git\nrepositories.\n" documentation = "https://docs.rs/git2" @@ -29,7 +29,7 @@ version = "0.2" [dependencies.libgit2-sys] -version = "0.12.6" +version = "0.12.13" [dependencies.log] version = "0.4.8" @@ -54,6 +54,7 @@ ssh = ["libgit2-sys/ssh"] ssh_key_from_memory = ["libgit2-sys/ssh_key_from_memory"] unstable = [] +zlib-ng-compat = ["libgit2-sys/zlib-ng-compat"] [target."cfg(all(unix, not(target_os = \"macos\")))".dependencies.openssl-probe] version = "0.1" optional = true diff -Nru cargo-0.44.1/vendor/git2/debian/patches/disable-vendor.patch cargo-0.47.0/vendor/git2/debian/patches/disable-vendor.patch --- cargo-0.44.1/vendor/git2/debian/patches/disable-vendor.patch 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/debian/patches/disable-vendor.patch 2020-10-01 21:38:28.000000000 +0000 @@ -7,6 +7,6 @@ ssh_key_from_memory = ["libgit2-sys/ssh_key_from_memory"] unstable = [] -vendored-openssl = ["openssl-sys/vendored"] + zlib-ng-compat = ["libgit2-sys/zlib-ng-compat"] [target."cfg(all(unix, not(target_os = \"macos\")))".dependencies.openssl-probe] version = "0.1" - optional = true diff -Nru cargo-0.44.1/vendor/git2/.pc/disable-vendor.patch/Cargo.toml cargo-0.47.0/vendor/git2/.pc/disable-vendor.patch/Cargo.toml --- cargo-0.44.1/vendor/git2/.pc/disable-vendor.patch/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/.pc/disable-vendor.patch/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "git2" -version = "0.13.6" +version = "0.13.11" authors = ["Josh Triplett ", "Alex Crichton "] description = "Bindings to libgit2 for interoperating with git repositories. This library is\nboth threadsafe and memory safe and allows both reading and writing git\nrepositories.\n" documentation = "https://docs.rs/git2" @@ -29,7 +29,7 @@ version = "0.2" [dependencies.libgit2-sys] -version = "0.12.6" +version = "0.12.13" [dependencies.log] version = "0.4.8" @@ -55,6 +55,7 @@ ssh_key_from_memory = ["libgit2-sys/ssh_key_from_memory"] unstable = [] vendored-openssl = ["openssl-sys/vendored"] +zlib-ng-compat = ["libgit2-sys/zlib-ng-compat"] [target."cfg(all(unix, not(target_os = \"macos\")))".dependencies.openssl-probe] version = "0.1" optional = true diff -Nru cargo-0.44.1/vendor/git2/src/config.rs cargo-0.47.0/vendor/git2/src/config.rs --- cargo-0.44.1/vendor/git2/src/config.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/config.rs 2020-10-01 21:38:28.000000000 +0000 @@ -289,6 +289,22 @@ } } + /// Iterate over the values of a multivar + /// + /// If `regexp` is `Some`, then the iterator will only iterate over all + /// values which match the pattern. + pub fn multivar(&self, name: &str, regexp: Option<&str>) -> Result, Error> { + let mut ret = ptr::null_mut(); + let name = CString::new(name)?; + let regexp = regexp.map(CString::new).transpose()?; + unsafe { + try_call!(raw::git_config_multivar_iterator_new( + &mut ret, &*self.raw, name, regexp + )); + Ok(Binding::from_raw(ret)) + } + } + /// Open the global/XDG configuration file according to git's rules /// /// Git allows you to store your global configuration at `$HOME/.config` or @@ -602,19 +618,43 @@ let mut cfg = Config::open(&path).unwrap(); cfg.set_multivar("foo.bar", "^$", "baz").unwrap(); cfg.set_multivar("foo.bar", "^$", "qux").unwrap(); + cfg.set_multivar("foo.bar", "^$", "quux").unwrap(); + cfg.set_multivar("foo.baz", "^$", "oki").unwrap(); + + // `entries` filters by name + let mut entries: Vec = cfg + .entries(Some("foo.bar")) + .unwrap() + .into_iter() + .map(|entry| entry.unwrap().value().unwrap().into()) + .collect(); + entries.sort(); + assert_eq!(entries, ["baz", "quux", "qux"]); + + // which is the same as `multivar` without a regex + let mut multivals: Vec = cfg + .multivar("foo.bar", None) + .unwrap() + .into_iter() + .map(|entry| entry.unwrap().value().unwrap().into()) + .collect(); + multivals.sort(); + assert_eq!(multivals, entries); - let mut values: Vec = cfg - .entries(None) + // yet _with_ a regex, `multivar` filters by value + let mut quxish: Vec = cfg + .multivar("foo.bar", Some("qu.*x")) .unwrap() .into_iter() .map(|entry| entry.unwrap().value().unwrap().into()) .collect(); - values.sort(); - assert_eq!(values, ["baz", "qux"]); + quxish.sort(); + assert_eq!(quxish, ["quux", "qux"]); cfg.remove_multivar("foo.bar", ".*").unwrap(); - assert_eq!(cfg.entries(None).unwrap().count(), 0); + assert_eq!(cfg.entries(Some("foo.bar")).unwrap().count(), 0); + assert_eq!(cfg.multivar("foo.bar", None).unwrap().count(), 0); } #[test] diff -Nru cargo-0.44.1/vendor/git2/src/diff.rs cargo-0.47.0/vendor/git2/src/diff.rs --- cargo-0.44.1/vendor/git2/src/diff.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/diff.rs 2020-10-01 21:38:28.000000000 +0000 @@ -266,7 +266,8 @@ message = &message[summary.len()..]; raw_opts.patch_no = patch_no; raw_opts.total_patches = total_patches; - raw_opts.id = commit.id().raw(); + let id = commit.id(); + raw_opts.id = id.raw(); raw_opts.summary = summary.as_ptr() as *const _; raw_opts.body = message.as_ptr() as *const _; raw_opts.author = commit.author().raw(); diff -Nru cargo-0.44.1/vendor/git2/src/error.rs cargo-0.47.0/vendor/git2/src/error.rs --- cargo-0.44.1/vendor/git2/src/error.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/error.rs 2020-10-01 21:38:28.000000000 +0000 @@ -16,6 +16,18 @@ } impl Error { + /// Creates a new error. + /// + /// This is mainly intended for implementors of custom transports or + /// database backends, where it is desirable to propagate an [`Error`] + /// through `libgit2`. + pub fn new>(code: ErrorCode, class: ErrorClass, message: S) -> Self { + let mut err = Error::from_str(message.as_ref()); + err.set_code(code); + err.set_class(class); + err + } + /// Returns the last error that happened with the code specified by `code`. /// /// The `code` argument typically comes from the return value of a function @@ -111,10 +123,49 @@ raw::GIT_EINVALID => super::ErrorCode::Invalid, raw::GIT_EUNCOMMITTED => super::ErrorCode::Uncommitted, raw::GIT_EDIRECTORY => super::ErrorCode::Directory, + raw::GIT_EMERGECONFLICT => super::ErrorCode::MergeConflict, + raw::GIT_EMISMATCH => super::ErrorCode::HashsumMismatch, + raw::GIT_EINDEXDIRTY => super::ErrorCode::IndexDirty, + raw::GIT_EAPPLYFAIL => super::ErrorCode::ApplyFail, _ => super::ErrorCode::GenericError, } } + /// Modify the error code associated with this error. + /// + /// This is mainly intended to be used by implementors of custom transports + /// or database backends, and should be used with care. + pub fn set_code(&mut self, code: ErrorCode) { + self.code = match code { + ErrorCode::GenericError => raw::GIT_ERROR, + ErrorCode::NotFound => raw::GIT_ENOTFOUND, + ErrorCode::Exists => raw::GIT_EEXISTS, + ErrorCode::Ambiguous => raw::GIT_EAMBIGUOUS, + ErrorCode::BufSize => raw::GIT_EBUFS, + ErrorCode::User => raw::GIT_EUSER, + ErrorCode::BareRepo => raw::GIT_EBAREREPO, + ErrorCode::UnbornBranch => raw::GIT_EUNBORNBRANCH, + ErrorCode::Unmerged => raw::GIT_EUNMERGED, + ErrorCode::NotFastForward => raw::GIT_ENONFASTFORWARD, + ErrorCode::InvalidSpec => raw::GIT_EINVALIDSPEC, + ErrorCode::Conflict => raw::GIT_ECONFLICT, + ErrorCode::Locked => raw::GIT_ELOCKED, + ErrorCode::Modified => raw::GIT_EMODIFIED, + ErrorCode::Auth => raw::GIT_EAUTH, + ErrorCode::Certificate => raw::GIT_ECERTIFICATE, + ErrorCode::Applied => raw::GIT_EAPPLIED, + ErrorCode::Peel => raw::GIT_EPEEL, + ErrorCode::Eof => raw::GIT_EEOF, + ErrorCode::Invalid => raw::GIT_EINVALID, + ErrorCode::Uncommitted => raw::GIT_EUNCOMMITTED, + ErrorCode::Directory => raw::GIT_EDIRECTORY, + ErrorCode::MergeConflict => raw::GIT_EMERGECONFLICT, + ErrorCode::HashsumMismatch => raw::GIT_EMISMATCH, + ErrorCode::IndexDirty => raw::GIT_EINDEXDIRTY, + ErrorCode::ApplyFail => raw::GIT_EAPPLYFAIL, + }; + } + /// Return the error class associated with this error. /// /// Error classes are in general mostly just informative. For example the @@ -161,6 +212,50 @@ } } + /// Modify the error class associated with this error. + /// + /// This is mainly intended to be used by implementors of custom transports + /// or database backends, and should be used with care. + pub fn set_class(&mut self, class: ErrorClass) { + self.klass = match class { + ErrorClass::None => raw::GIT_ERROR_NONE, + ErrorClass::NoMemory => raw::GIT_ERROR_NOMEMORY, + ErrorClass::Os => raw::GIT_ERROR_OS, + ErrorClass::Invalid => raw::GIT_ERROR_INVALID, + ErrorClass::Reference => raw::GIT_ERROR_REFERENCE, + ErrorClass::Zlib => raw::GIT_ERROR_ZLIB, + ErrorClass::Repository => raw::GIT_ERROR_REPOSITORY, + ErrorClass::Config => raw::GIT_ERROR_CONFIG, + ErrorClass::Regex => raw::GIT_ERROR_REGEX, + ErrorClass::Odb => raw::GIT_ERROR_ODB, + ErrorClass::Index => raw::GIT_ERROR_INDEX, + ErrorClass::Object => raw::GIT_ERROR_OBJECT, + ErrorClass::Net => raw::GIT_ERROR_NET, + ErrorClass::Tag => raw::GIT_ERROR_TAG, + ErrorClass::Tree => raw::GIT_ERROR_TREE, + ErrorClass::Indexer => raw::GIT_ERROR_INDEXER, + ErrorClass::Ssl => raw::GIT_ERROR_SSL, + ErrorClass::Submodule => raw::GIT_ERROR_SUBMODULE, + ErrorClass::Thread => raw::GIT_ERROR_THREAD, + ErrorClass::Stash => raw::GIT_ERROR_STASH, + ErrorClass::Checkout => raw::GIT_ERROR_CHECKOUT, + ErrorClass::FetchHead => raw::GIT_ERROR_FETCHHEAD, + ErrorClass::Merge => raw::GIT_ERROR_MERGE, + ErrorClass::Ssh => raw::GIT_ERROR_SSH, + ErrorClass::Filter => raw::GIT_ERROR_FILTER, + ErrorClass::Revert => raw::GIT_ERROR_REVERT, + ErrorClass::Callback => raw::GIT_ERROR_CALLBACK, + ErrorClass::CherryPick => raw::GIT_ERROR_CHERRYPICK, + ErrorClass::Describe => raw::GIT_ERROR_DESCRIBE, + ErrorClass::Rebase => raw::GIT_ERROR_REBASE, + ErrorClass::Filesystem => raw::GIT_ERROR_FILESYSTEM, + ErrorClass::Patch => raw::GIT_ERROR_PATCH, + ErrorClass::Worktree => raw::GIT_ERROR_WORKTREE, + ErrorClass::Sha1 => raw::GIT_ERROR_SHA1, + ErrorClass::Http => raw::GIT_ERROR_HTTP, + } as c_int; + } + /// Return the raw error code associated with this error. pub fn raw_code(&self) -> raw::git_error_code { macro_rules! check( ($($e:ident,)*) => ( diff -Nru cargo-0.44.1/vendor/git2/src/index.rs cargo-0.47.0/vendor/git2/src/index.rs --- cargo-0.44.1/vendor/git2/src/index.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/index.rs 2020-10-01 21:38:28.000000000 +0000 @@ -66,6 +66,21 @@ pub id: Oid, pub flags: u16, pub flags_extended: u16, + + /// The path of this index entry as a byte vector. Regardless of the + /// current platform, the directory separator is an ASCII forward slash + /// (`0x2F`). There are no terminating or internal NUL characters, and no + /// trailing slashes. Most of the time, paths will be valid utf-8 — but + /// not always. For more information on the path storage format, see + /// [these git docs][git-index-docs]. Note that libgit2 will take care of + /// handling the prefix compression mentioned there. + /// + /// [git-index-docs]: https://github.com/git/git/blob/a08a83db2bf27f015bec9a435f6d73e223c21c5e/Documentation/technical/index-format.txt#L107-L124 + /// + /// You can turn this value into a `std::ffi::CString` with + /// `CString::new(&entry.path[..]).unwrap()`. To turn a reference into a + /// `&std::path::Path`, see the `bytes2path()` function in the private, + /// internal `util` module in this crate’s source code. pub path: Vec, } @@ -102,6 +117,27 @@ } } + /// Get index on-disk version. + /// + /// Valid return values are 2, 3, or 4. If 3 is returned, an index + /// with version 2 may be written instead, if the extension data in + /// version 3 is not necessary. + pub fn version(&self) -> u32 { + unsafe { raw::git_index_version(self.raw) } + } + + /// Set index on-disk version. + /// + /// Valid values are 2, 3, or 4. If 2 is given, git_index_write may + /// write an index with version 3 instead, if necessary to accurately + /// represent the index. + pub fn set_version(&mut self, version: u32) -> Result<(), Error> { + unsafe { + try_call!(raw::git_index_set_version(self.raw, version)); + } + Ok(()) + } + /// Add or update an index entry from an in-memory struct /// /// If a previous index entry exists that has the same path and stage as the diff -Nru cargo-0.44.1/vendor/git2/src/lib.rs cargo-0.47.0/vendor/git2/src/lib.rs --- cargo-0.44.1/vendor/git2/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -97,6 +97,7 @@ Index, IndexConflict, IndexConflicts, IndexEntries, IndexEntry, IndexMatchedPath, }; pub use crate::indexer::{IndexerProgress, Progress}; +pub use crate::mempack::Mempack; pub use crate::merge::{AnnotatedCommit, MergeOptions}; pub use crate::message::{message_prettify, DEFAULT_COMMENT_CHAR}; pub use crate::note::{Note, Notes}; @@ -118,6 +119,7 @@ pub use crate::remote_callbacks::{Credentials, RemoteCallbacks}; pub use crate::remote_callbacks::{TransportMessage, UpdateTips}; pub use crate::repo::{Repository, RepositoryInitOptions}; +pub use crate::revert::RevertOptions; pub use crate::revspec::Revspec; pub use crate::revwalk::Revwalk; pub use crate::signature::Signature; @@ -142,6 +144,12 @@ /// An enumeration of possible errors that can happen when working with a git /// repository. +// Note: We omit a few native error codes, as they are unlikely to be propagated +// to the library user. Currently: +// +// * GIT_EPASSTHROUGH +// * GIT_ITEROVER +// * GIT_RETRY #[derive(PartialEq, Eq, Clone, Debug, Copy)] pub enum ErrorCode { /// Generic error @@ -186,8 +194,16 @@ Invalid, /// Uncommitted changes in index prevented operation Uncommitted, - /// Operation was not valid for a directory, + /// Operation was not valid for a directory Directory, + /// A merge conflict exists and cannot continue + MergeConflict, + /// Hashsum mismatch in object + HashsumMismatch, + /// Unsaved changes in the index would be overwritten + IndexDirty, + /// Patch application failed + ApplyFail, } /// An enumeration of possible categories of things that can have @@ -622,6 +638,7 @@ pub mod build; pub mod cert; pub mod oid_array; +pub mod opts; pub mod string_array; pub mod transport; @@ -639,6 +656,7 @@ mod error; mod index; mod indexer; +mod mempack; mod merge; mod message; mod note; @@ -656,6 +674,7 @@ mod remote; mod remote_callbacks; mod repo; +mod revert; mod revspec; mod revwalk; mod signature; @@ -663,6 +682,7 @@ mod status; mod submodule; mod tag; +mod tagforeach; mod time; mod tree; mod treebuilder; diff -Nru cargo-0.44.1/vendor/git2/src/mempack.rs cargo-0.47.0/vendor/git2/src/mempack.rs --- cargo-0.44.1/vendor/git2/src/mempack.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/mempack.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,49 @@ +use std::marker; + +use crate::util::Binding; +use crate::{raw, Buf, Error, Odb, Repository}; + +/// A structure to represent a mempack backend for the object database. The +/// Mempack is bound to the Odb that it was created from, and cannot outlive +/// that Odb. +pub struct Mempack<'odb> { + raw: *mut raw::git_odb_backend, + _marker: marker::PhantomData<&'odb Odb<'odb>>, +} + +impl<'odb> Binding for Mempack<'odb> { + type Raw = *mut raw::git_odb_backend; + + unsafe fn from_raw(raw: *mut raw::git_odb_backend) -> Mempack<'odb> { + Mempack { + raw: raw, + _marker: marker::PhantomData, + } + } + + fn raw(&self) -> *mut raw::git_odb_backend { + self.raw + } +} + +// We don't need to implement `Drop` for Mempack because it is owned by the +// odb to which it is attached, and that will take care of freeing the mempack +// and associated memory. + +impl<'odb> Mempack<'odb> { + /// Dumps the contents of the mempack into the provided buffer. + pub fn dump(&self, repo: &Repository, buf: &mut Buf) -> Result<(), Error> { + unsafe { + try_call!(raw::git_mempack_dump(buf.raw(), repo.raw(), self.raw)); + } + Ok(()) + } + + /// Clears all data in the mempack. + pub fn reset(&self) -> Result<(), Error> { + unsafe { + try_call!(raw::git_mempack_reset(self.raw)); + } + Ok(()) + } +} diff -Nru cargo-0.44.1/vendor/git2/src/odb.rs cargo-0.47.0/vendor/git2/src/odb.rs --- cargo-0.44.1/vendor/git2/src/odb.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/odb.rs 2020-10-01 21:38:28.000000000 +0000 @@ -10,7 +10,7 @@ use crate::panic; use crate::util::Binding; -use crate::{raw, Error, IndexerProgress, Object, ObjectType, Oid, Progress}; +use crate::{raw, Error, IndexerProgress, Mempack, Object, ObjectType, Oid, Progress}; /// A structure to represent a git object database pub struct Odb<'repo> { @@ -218,6 +218,47 @@ Ok(()) } } + + /// Create a new mempack backend, and add it to this odb with the given + /// priority. Higher values give the backend higher precedence. The default + /// loose and pack backends have priorities 1 and 2 respectively (hard-coded + /// in libgit2). A reference to the new mempack backend is returned on + /// success. The lifetime of the backend must be contained within the + /// lifetime of this odb, since deletion of the odb will also result in + /// deletion of the mempack backend. + /// + /// Here is an example that fails to compile because it tries to hold the + /// mempack reference beyond the odb's lifetime: + /// + /// ```compile_fail + /// use git2::Odb; + /// let mempack = { + /// let odb = Odb::new().unwrap(); + /// odb.add_new_mempack_backend(1000).unwrap() + /// }; + /// ``` + pub fn add_new_mempack_backend<'odb>( + &'odb self, + priority: i32, + ) -> Result, Error> { + unsafe { + let mut mempack = ptr::null_mut(); + // The mempack backend object in libgit2 is only ever freed by an + // odb that has the backend in its list. So to avoid potentially + // leaking the mempack backend, this API ensures that the backend + // is added to the odb before returning it. The lifetime of the + // mempack is also bound to the lifetime of the odb, so that users + // can't end up with a dangling reference to a mempack object that + // was actually freed when the odb was destroyed. + try_call!(raw::git_mempack_new(&mut mempack)); + try_call!(raw::git_odb_add_backend( + self.raw, + mempack, + priority as c_int + )); + Ok(Mempack::from_raw(mempack)) + } + } } /// An object from the Object Database. @@ -626,4 +667,45 @@ } assert_eq!(progress_called, true); } + + #[test] + fn write_with_mempack() { + use crate::{Buf, ResetType}; + use std::io::Write; + use std::path::Path; + + // Create a repo, add a mempack backend + let (_td, repo) = crate::test::repo_init(); + let odb = repo.odb().unwrap(); + let mempack = odb.add_new_mempack_backend(1000).unwrap(); + + // Sanity check that foo doesn't exist initially + let foo_file = Path::new(repo.workdir().unwrap()).join("foo"); + assert!(!foo_file.exists()); + + // Make a commit that adds foo. This writes new stuff into the mempack + // backend. + let (oid1, _id) = crate::test::commit(&repo); + let commit1 = repo.find_commit(oid1).unwrap(); + t!(repo.reset(commit1.as_object(), ResetType::Hard, None)); + assert!(foo_file.exists()); + + // Dump the mempack modifications into a buf, and reset it. This "erases" + // commit-related objects from the repository. Ensure the commit appears + // to have become invalid, by checking for failure in `reset --hard`. + let mut buf = Buf::new(); + mempack.dump(&repo, &mut buf).unwrap(); + mempack.reset().unwrap(); + assert!(repo + .reset(commit1.as_object(), ResetType::Hard, None) + .is_err()); + + // Write the buf into a packfile in the repo. This brings back the + // missing objects, and we verify everything is good again. + let mut packwriter = odb.packwriter().unwrap(); + packwriter.write(&buf).unwrap(); + packwriter.commit().unwrap(); + t!(repo.reset(commit1.as_object(), ResetType::Hard, None)); + assert!(foo_file.exists()); + } } diff -Nru cargo-0.44.1/vendor/git2/src/opts.rs cargo-0.47.0/vendor/git2/src/opts.rs --- cargo-0.44.1/vendor/git2/src/opts.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/opts.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,27 @@ +//! Bindings to libgit2's git_libgit2_opts function. + +use crate::raw; + +/// Controls whether or not libgit2 will verify that objects loaded have the +/// expected hash. Enabled by default, but disabling this can significantly +/// improve performance, at the cost of relying on repository integrity +/// without checking it. +pub fn strict_hash_verification(enabled: bool) { + let error = unsafe { + raw::git_libgit2_opts( + raw::GIT_OPT_ENABLE_STRICT_HASH_VERIFICATION as libc::c_int, + if enabled { 1 } else { 0 } as libc::c_int, + ) + }; + // This function cannot actually fail, but the function has an error return + // for other options that can. + debug_assert!(error >= 0); +} + +#[cfg(test)] +mod test { + #[test] + fn smoke() { + super::strict_hash_verification(false); + } +} diff -Nru cargo-0.44.1/vendor/git2/src/packbuilder.rs cargo-0.47.0/vendor/git2/src/packbuilder.rs --- cargo-0.44.1/vendor/git2/src/packbuilder.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/packbuilder.rs 2020-10-01 21:38:28.000000000 +0000 @@ -6,6 +6,7 @@ use crate::util::Binding; use crate::{panic, raw, Buf, Error, Oid, Repository, Revwalk}; +#[derive(PartialEq, Eq, Clone, Debug, Copy)] /// Stages that are reported by the `PackBuilder` progress callback. pub enum PackBuilderStage { /// Adding objects to the pack diff -Nru cargo-0.44.1/vendor/git2/src/refspec.rs cargo-0.47.0/vendor/git2/src/refspec.rs --- cargo-0.44.1/vendor/git2/src/refspec.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/refspec.rs 2020-10-01 21:38:28.000000000 +0000 @@ -3,7 +3,7 @@ use std::str; use crate::util::Binding; -use crate::{raw, Direction}; +use crate::{raw, Buf, Direction, Error}; /// A structure to represent a git [refspec][1]. /// @@ -77,6 +77,34 @@ pub fn bytes(&self) -> &[u8] { unsafe { crate::opt_bytes(self, raw::git_refspec_string(self.raw)).unwrap() } } + + /// Transform a reference to its target following the refspec's rules + pub fn transform(&self, name: &str) -> Result { + let name = CString::new(name).unwrap(); + unsafe { + let buf = Buf::new(); + try_call!(raw::git_refspec_transform( + buf.raw(), + self.raw, + name.as_ptr() + )); + Ok(buf) + } + } + + /// Transform a target reference to its source reference following the refspec's rules + pub fn rtransform(&self, name: &str) -> Result { + let name = CString::new(name).unwrap(); + unsafe { + let buf = Buf::new(); + try_call!(raw::git_refspec_rtransform( + buf.raw(), + self.raw, + name.as_ptr() + )); + Ok(buf) + } + } } impl<'remote> Binding for Refspec<'remote> { diff -Nru cargo-0.44.1/vendor/git2/src/remote_callbacks.rs cargo-0.47.0/vendor/git2/src/remote_callbacks.rs --- cargo-0.44.1/vendor/git2/src/remote_callbacks.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/remote_callbacks.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,4 @@ -use libc::{c_char, c_int, c_uint, c_void}; +use libc::{c_char, c_int, c_uint, c_void, size_t}; use std::ffi::{CStr, CString}; use std::mem; use std::ptr; @@ -7,7 +7,9 @@ use crate::cert::Cert; use crate::util::Binding; -use crate::{panic, raw, Cred, CredentialType, Error, IndexerProgress, Oid, Progress}; +use crate::{ + panic, raw, Cred, CredentialType, Error, IndexerProgress, Oid, PackBuilderStage, Progress, +}; /// A structure to contain the callbacks which are invoked when a repository is /// being updated or downloaded. @@ -15,7 +17,9 @@ /// These callbacks are used to manage facilities such as authentication, /// transfer progress, etc. pub struct RemoteCallbacks<'a> { + push_progress: Option>>, progress: Option>>, + pack_progress: Option>>, credentials: Option>>, sideband_progress: Option>>, update_tips: Option>>, @@ -56,6 +60,22 @@ /// was rejected by the remote server with a reason why. pub type PushUpdateReference<'a> = dyn FnMut(&str, Option<&str>) -> Result<(), Error> + 'a; +/// Callback for push transfer progress +/// +/// Parameters: +/// * current +/// * total +/// * bytes +pub type PushTransferProgress<'a> = dyn FnMut(usize, usize, usize) + 'a; + +/// Callback for pack progress +/// +/// Parameters: +/// * stage +/// * current +/// * total +pub type PackProgress<'a> = dyn FnMut(PackBuilderStage, usize, usize) + 'a; + impl<'a> Default for RemoteCallbacks<'a> { fn default() -> Self { Self::new() @@ -68,10 +88,12 @@ RemoteCallbacks { credentials: None, progress: None, + pack_progress: None, sideband_progress: None, update_tips: None, certificate_check: None, push_update_reference: None, + push_progress: None, } } @@ -158,6 +180,26 @@ self.push_update_reference = Some(Box::new(cb) as Box>); self } + + /// The callback through which progress of push transfer is monitored + pub fn push_transfer_progress(&mut self, cb: F) -> &mut RemoteCallbacks<'a> + where + F: FnMut(usize, usize, usize) + 'a, + { + self.push_progress = Some(Box::new(cb) as Box>); + self + } + + /// Function to call with progress information during pack building. + /// Be aware that this is called inline with pack building operations, + /// so performance may be affected. + pub fn pack_progress(&mut self, cb: F) -> &mut RemoteCallbacks<'a> + where + F: FnMut(PackBuilderStage, usize, usize) + 'a, + { + self.pack_progress = Some(Box::new(cb) as Box>); + self + } } impl<'a> Binding for RemoteCallbacks<'a> { @@ -188,6 +230,12 @@ if self.push_update_reference.is_some() { callbacks.push_update_reference = Some(push_update_reference_cb); } + if self.push_progress.is_some() { + callbacks.push_transfer_progress = Some(push_transfer_progress_cb); + } + if self.pack_progress.is_some() { + callbacks.pack_progress = Some(pack_progress_cb); + } if self.update_tips.is_some() { let f: extern "C" fn( *const c_char, @@ -360,3 +408,45 @@ }) .unwrap_or(-1) } + +extern "C" fn push_transfer_progress_cb( + progress: c_uint, + total: c_uint, + bytes: size_t, + data: *mut c_void, +) -> c_int { + panic::wrap(|| unsafe { + let payload = &mut *(data as *mut RemoteCallbacks<'_>); + let callback = match payload.push_progress { + Some(ref mut c) => c, + None => return 0, + }; + + callback(progress as usize, total as usize, bytes as usize); + + 0 + }) + .unwrap_or(-1) +} + +extern "C" fn pack_progress_cb( + stage: raw::git_packbuilder_stage_t, + current: c_uint, + total: c_uint, + data: *mut c_void, +) -> c_int { + panic::wrap(|| unsafe { + let payload = &mut *(data as *mut RemoteCallbacks<'_>); + let callback = match payload.pack_progress { + Some(ref mut c) => c, + None => return 0, + }; + + let stage = Binding::from_raw(stage); + + callback(stage, current as usize, total as usize); + + 0 + }) + .unwrap_or(-1) +} diff -Nru cargo-0.44.1/vendor/git2/src/remote.rs cargo-0.47.0/vendor/git2/src/remote.rs --- cargo-0.44.1/vendor/git2/src/remote.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/remote.rs 2020-10-01 21:38:28.000000000 +0000 @@ -9,7 +9,7 @@ use crate::string_array::StringArray; use crate::util::Binding; -use crate::{raw, Direction, Error, FetchPrune, Oid, ProxyOptions, Refspec}; +use crate::{raw, Buf, Direction, Error, FetchPrune, Oid, ProxyOptions, Refspec}; use crate::{AutotagOption, Progress, RemoteCallbacks, Repository}; /// A structure representing a [remote][1] of a git repository. @@ -112,6 +112,20 @@ unsafe { crate::opt_bytes(self, raw::git_remote_pushurl(&*self.raw)) } } + /// Get the remote's default branch. + /// + /// The remote (or more exactly its transport) must have connected to the + /// remote repository. This default branch is available as soon as the + /// connection to the remote is initiated and it remains available after + /// disconnecting. + pub fn default_branch(&self) -> Result { + unsafe { + let buf = Buf::new(); + try_call!(raw::git_remote_default_branch(buf.raw(), self.raw)); + Ok(buf) + } + } + /// Open a connection to a remote. pub fn connect(&mut self, dir: Direction) -> Result<(), Error> { // TODO: can callbacks be exposed safely? @@ -576,6 +590,14 @@ pub fn list(&self) -> Result<&[RemoteHead<'_>], Error> { self.remote.list() } + + /// Get the remote's default branch. + /// + /// This default branch is available as soon as the connection to the remote + /// is initiated and it remains available after disconnecting. + pub fn default_branch(&self) -> Result { + self.remote.default_branch() + } } impl<'repo, 'connection, 'cb> Drop for RemoteConnection<'repo, 'connection, 'cb> { diff -Nru cargo-0.44.1/vendor/git2/src/repo.rs cargo-0.47.0/vendor/git2/src/repo.rs --- cargo-0.44.1/vendor/git2/src/repo.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/repo.rs 2020-10-01 21:38:28.000000000 +0000 @@ -14,11 +14,13 @@ use crate::oid_array::OidArray; use crate::stash::{stash_cb, StashApplyOptions, StashCbData}; use crate::string_array::StringArray; +use crate::tagforeach::{tag_foreach_cb, TagForeachCB, TagForeachData}; use crate::util::{self, path_to_repo_path, Binding}; use crate::CherrypickOptions; +use crate::RevertOptions; use crate::{ - init, raw, AttrCheckFlags, Buf, Error, Object, Remote, RepositoryOpenFlags, RepositoryState, - Revspec, StashFlags, + raw, AttrCheckFlags, Buf, Error, Object, Remote, RepositoryOpenFlags, RepositoryState, Revspec, + StashFlags, }; use crate::{ AnnotatedCommit, MergeAnalysis, MergeOptions, MergePreference, SubmoduleIgnore, SubmoduleStatus, @@ -63,7 +65,7 @@ /// /// The path can point to either a normal or bare repository. pub fn open>(path: P) -> Result { - init(); + crate::init(); // Normal file path OK (does not need Windows conversion). let path = path.as_ref().into_c_string()?; let mut ret = ptr::null_mut(); @@ -77,7 +79,7 @@ /// /// The path can point to only a bare repository. pub fn open_bare>(path: P) -> Result { - init(); + crate::init(); // Normal file path OK (does not need Windows conversion). let path = path.as_ref().into_c_string()?; let mut ret = ptr::null_mut(); @@ -93,7 +95,7 @@ /// With `$GIT_DIR` unset, this will search for a repository starting in /// the current directory. pub fn open_from_env() -> Result { - init(); + crate::init(); let mut ret = ptr::null_mut(); let flags = raw::GIT_REPOSITORY_OPEN_FROM_ENV; unsafe { @@ -143,7 +145,7 @@ O: AsRef, I: IntoIterator, { - init(); + crate::init(); // Normal file path OK (does not need Windows conversion). let path = path.as_ref().into_c_string()?; let ceiling_dirs_os = env::join_paths(ceiling_dirs)?; @@ -166,7 +168,7 @@ /// until it finds a repository. pub fn discover>(path: P) -> Result { // TODO: this diverges significantly from the libgit2 API - init(); + crate::init(); let buf = Buf::new(); // Normal file path OK (does not need Windows conversion). let path = path.as_ref().into_c_string()?; @@ -204,7 +206,7 @@ path: P, opts: &RepositoryInitOptions, ) -> Result { - init(); + crate::init(); // Normal file path OK (does not need Windows conversion). let path = path.as_ref().into_c_string()?; let mut ret = ptr::null_mut(); @@ -236,7 +238,7 @@ /// Attempt to wrap an object database as a repository. pub fn from_odb(odb: Odb<'_>) -> Result { - init(); + crate::init(); let mut ret = ptr::null_mut(); unsafe { try_call!(raw::git_repository_wrap_odb(&mut ret, odb.raw())); @@ -1053,6 +1055,14 @@ } } + /// Override the object database for this repository + pub fn set_odb(&self, odb: &Odb<'_>) -> Result<(), Error> { + unsafe { + try_call!(raw::git_repository_set_odb(self.raw(), odb.raw())); + } + Ok(()) + } + /// Create a new branch pointing at a target commit /// /// A new direct reference will be created pointing to this target commit. @@ -1730,6 +1740,26 @@ } } + /// iterate over all tags calling `cb` on each. + /// the callback is provided the tag id and name + pub fn tag_foreach(&self, cb: T) -> Result<(), Error> + where + T: FnMut(Oid, &[u8]) -> bool, + { + let mut data = TagForeachData { + cb: Box::new(cb) as TagForeachCB<'_>, + }; + + unsafe { + raw::git_tag_foreach( + self.raw, + Some(tag_foreach_cb), + (&mut data) as *mut _ as *mut _, + ); + } + Ok(()) + } + /// Updates files in the index and the working tree to match the content of /// the commit pointed at by HEAD. pub fn checkout_head(&self, opts: Option<&mut CheckoutBuilder<'_>>) -> Result<(), Error> { @@ -2668,6 +2698,16 @@ } } + /// Find the remote name of a remote-tracking branch + pub fn branch_remote_name(&self, refname: &str) -> Result { + let refname = CString::new(refname)?; + unsafe { + let buf = Buf::new(); + try_call!(raw::git_branch_remote_name(buf.raw(), self.raw, refname)); + Ok(buf) + } + } + /// Retrieves the name of the reference supporting the remote tracking branch, /// given the name of a local branch reference. pub fn branch_upstream_name(&self, refname: &str) -> Result { @@ -2711,6 +2751,46 @@ Ok(()) } } + + /// Reverts the given commit, producing changes in the index and working directory. + pub fn revert( + &self, + commit: &Commit<'_>, + options: Option<&mut RevertOptions<'_>>, + ) -> Result<(), Error> { + let raw_opts = options.map(|o| o.raw()); + let ptr_raw_opts = match raw_opts.as_ref() { + Some(v) => v, + None => 0 as *const _, + }; + unsafe { + try_call!(raw::git_revert(self.raw(), commit.raw(), ptr_raw_opts)); + Ok(()) + } + } + + /// Reverts the given commit against the given "our" commit, + /// producing an index that reflects the result of the revert. + pub fn revert_commit( + &self, + revert_commit: &Commit<'_>, + our_commit: &Commit<'_>, + mainline: u32, + options: Option<&MergeOptions>, + ) -> Result { + let mut ret = ptr::null_mut(); + unsafe { + try_call!(raw::git_revert_commit( + &mut ret, + self.raw(), + revert_commit.raw(), + our_commit.raw(), + mainline, + options.map(|o| o.raw()) + )); + Ok(Binding::from_raw(ret)) + } + } } impl Binding for Repository { @@ -3450,4 +3530,35 @@ assert!(!p1.exists()); assert!(p2.exists()); } + + #[test] + fn smoke_revert() { + let (_td, repo) = crate::test::repo_init(); + let foo_file = Path::new(repo.workdir().unwrap()).join("foo"); + assert!(!foo_file.exists()); + + let (oid1, _id) = crate::test::commit(&repo); + let commit1 = repo.find_commit(oid1).unwrap(); + t!(repo.reset(commit1.as_object(), ResetType::Hard, None)); + assert!(foo_file.exists()); + + repo.revert(&commit1, None).unwrap(); + let id = repo.index().unwrap().write_tree().unwrap(); + let tree2 = repo.find_tree(id).unwrap(); + let sig = repo.signature().unwrap(); + repo.commit(Some("HEAD"), &sig, &sig, "commit 1", &tree2, &[&commit1]) + .unwrap(); + // reverting once removes `foo` file + assert!(!foo_file.exists()); + + let oid2 = repo.head().unwrap().target().unwrap(); + let commit2 = repo.find_commit(oid2).unwrap(); + repo.revert(&commit2, None).unwrap(); + let id = repo.index().unwrap().write_tree().unwrap(); + let tree3 = repo.find_tree(id).unwrap(); + repo.commit(Some("HEAD"), &sig, &sig, "commit 2", &tree3, &[&commit2]) + .unwrap(); + // reverting twice restores `foo` file + assert!(foo_file.exists()); + } } diff -Nru cargo-0.44.1/vendor/git2/src/revert.rs cargo-0.47.0/vendor/git2/src/revert.rs --- cargo-0.44.1/vendor/git2/src/revert.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/revert.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,69 @@ +use std::mem; + +use crate::build::CheckoutBuilder; +use crate::merge::MergeOptions; +use crate::raw; +use std::ptr; + +/// Options to specify when reverting +pub struct RevertOptions<'cb> { + mainline: u32, + checkout_builder: Option>, + merge_opts: Option, +} + +impl<'cb> RevertOptions<'cb> { + /// Creates a default set of revert options + pub fn new() -> RevertOptions<'cb> { + RevertOptions { + mainline: 0, + checkout_builder: None, + merge_opts: None, + } + } + + /// Set the mainline value + /// + /// For merge commits, the "mainline" is treated as the parent. + pub fn mainline(&mut self, mainline: u32) -> &mut Self { + self.mainline = mainline; + self + } + + /// Set the checkout builder + pub fn checkout_builder(&mut self, cb: CheckoutBuilder<'cb>) -> &mut Self { + self.checkout_builder = Some(cb); + self + } + + /// Set the merge options + pub fn merge_opts(&mut self, merge_opts: MergeOptions) -> &mut Self { + self.merge_opts = Some(merge_opts); + self + } + + /// Obtain the raw struct + pub fn raw(&mut self) -> raw::git_revert_options { + unsafe { + let mut checkout_opts: raw::git_checkout_options = mem::zeroed(); + raw::git_checkout_init_options(&mut checkout_opts, raw::GIT_CHECKOUT_OPTIONS_VERSION); + if let Some(ref mut cb) = self.checkout_builder { + cb.configure(&mut checkout_opts); + } + + let mut merge_opts: raw::git_merge_options = mem::zeroed(); + raw::git_merge_init_options(&mut merge_opts, raw::GIT_MERGE_OPTIONS_VERSION); + if let Some(ref opts) = self.merge_opts { + ptr::copy(opts.raw(), &mut merge_opts, 1); + } + + let mut revert_opts: raw::git_revert_options = mem::zeroed(); + raw::git_revert_options_init(&mut revert_opts, raw::GIT_REVERT_OPTIONS_VERSION); + revert_opts.mainline = self.mainline; + revert_opts.checkout_opts = checkout_opts; + revert_opts.merge_opts = merge_opts; + + revert_opts + } + } +} diff -Nru cargo-0.44.1/vendor/git2/src/submodule.rs cargo-0.47.0/vendor/git2/src/submodule.rs --- cargo-0.44.1/vendor/git2/src/submodule.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/submodule.rs 2020-10-01 21:38:28.000000000 +0000 @@ -33,6 +33,25 @@ unsafe { crate::opt_bytes(self, raw::git_submodule_branch(self.raw)) } } + /// Perform the clone step for a newly created submodule. + /// + /// This performs the necessary `git_clone` to setup a newly-created submodule. + pub fn clone( + &mut self, + opts: Option<&mut SubmoduleUpdateOptions<'_>>, + ) -> Result { + unsafe { + let raw_opts = opts.map(|o| o.raw()); + let mut raw_repo = ptr::null_mut(); + try_call!(raw::git_submodule_clone( + &mut raw_repo, + self.raw, + raw_opts.as_ref() + )); + Ok(Binding::from_raw(raw_repo)) + } + } + /// Get the submodule's url. /// /// Returns `None` if the url is not valid utf-8 or if the URL isn't present @@ -360,4 +379,26 @@ t!(submodule.update(init, opts)); } } + + #[test] + fn clone_submodule() { + // ----------------------------------- + // Same as `add_a_submodule()` + let (_td, repo1) = crate::test::repo_init(); + let (_td, repo2) = crate::test::repo_init(); + let (_td, parent) = crate::test::repo_init(); + + let url1 = Url::from_file_path(&repo1.workdir().unwrap()).unwrap(); + let url3 = Url::from_file_path(&repo2.workdir().unwrap()).unwrap(); + let mut s1 = parent + .submodule(&url1.to_string(), Path::new("bar"), true) + .unwrap(); + let mut s2 = parent + .submodule(&url3.to_string(), Path::new("bar2"), true) + .unwrap(); + // ----------------------------------- + + t!(s1.clone(Some(&mut SubmoduleUpdateOptions::default()))); + t!(s2.clone(None)); + } } diff -Nru cargo-0.44.1/vendor/git2/src/tagforeach.rs cargo-0.47.0/vendor/git2/src/tagforeach.rs --- cargo-0.44.1/vendor/git2/src/tagforeach.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/tagforeach.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,69 @@ +//! git_tag_foreach support +//! see original: https://libgit2.org/libgit2/#HEAD/group/tag/git_tag_foreach + +use crate::{panic, raw, util::Binding, Oid}; +use libc::{c_char, c_int}; +use raw::git_oid; +use std::ffi::{c_void, CStr}; + +/// boxed callback type +pub(crate) type TagForeachCB<'a> = Box bool + 'a>; + +/// helper type to be able to pass callback to payload +pub(crate) struct TagForeachData<'a> { + /// callback + pub(crate) cb: TagForeachCB<'a>, +} + +/// c callback forwarding to rust callback inside `TagForeachData` +/// see original: https://libgit2.org/libgit2/#HEAD/group/callback/git_tag_foreach_cb +pub(crate) extern "C" fn tag_foreach_cb( + name: *const c_char, + oid: *mut git_oid, + payload: *mut c_void, +) -> c_int { + panic::wrap(|| unsafe { + let id: Oid = Binding::from_raw(oid as *const _); + + let name = CStr::from_ptr(name); + let name = name.to_bytes(); + + let payload = &mut *(payload as *mut TagForeachData<'_>); + let cb = &mut payload.cb; + + let res = cb(id, name); + + if res { + 0 + } else { + -1 + } + }) + .unwrap_or(-1) +} + +#[cfg(test)] +mod tests { + + #[test] + fn smoke() { + let (_td, repo) = crate::test::repo_init(); + let head = repo.head().unwrap(); + let id = head.target().unwrap(); + assert!(repo.find_tag(id).is_err()); + + let obj = repo.find_object(id, None).unwrap(); + let sig = repo.signature().unwrap(); + let tag_id = repo.tag("foo", &obj, &sig, "msg", false).unwrap(); + + let mut tags = Vec::new(); + repo.tag_foreach(|id, name| { + tags.push((id, String::from_utf8(name.into()).unwrap())); + true + }) + .unwrap(); + + assert_eq!(tags[0].0, tag_id); + assert_eq!(tags[0].1, "refs/tags/foo"); + } +} diff -Nru cargo-0.44.1/vendor/git2/src/transport.rs cargo-0.47.0/vendor/git2/src/transport.rs --- cargo-0.44.1/vendor/git2/src/transport.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2/src/transport.rs 2020-10-01 21:38:28.000000000 +0000 @@ -249,7 +249,10 @@ let transport = &mut *(raw_transport as *mut RawSmartSubtransport); let obj = match transport.obj.action(url, action) { Ok(s) => s, - Err(e) => return e.raw_code() as c_int, + Err(e) => { + set_err(&e); + return e.raw_code() as c_int; + } }; *stream = mem::transmute(Box::new(RawSmartSubtransportStream { raw: raw::git_smart_subtransport_stream { @@ -309,7 +312,7 @@ match ret { Some(Ok(_)) => 0, Some(Err(e)) => unsafe { - set_err(&e); + set_err_io(&e); -2 }, None => -1, @@ -331,18 +334,23 @@ match ret { Some(Ok(())) => 0, Some(Err(e)) => unsafe { - set_err(&e); + set_err_io(&e); -2 }, None => -1, } } -unsafe fn set_err(e: &io::Error) { +unsafe fn set_err_io(e: &io::Error) { let s = CString::new(e.to_string()).unwrap(); raw::git_error_set_str(raw::GIT_ERROR_NET as c_int, s.as_ptr()); } +unsafe fn set_err(e: &Error) { + let s = CString::new(e.message()).unwrap(); + raw::git_error_set_str(e.raw_class() as c_int, s.as_ptr()); +} + // callback used by smart transports to free a `SmartSubtransportStream` // object. extern "C" fn stream_free(stream: *mut raw::git_smart_subtransport_stream) { @@ -350,3 +358,55 @@ mem::transmute::<_, Box>(stream); }); } + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ErrorClass, ErrorCode}; + use std::sync::Once; + + struct DummyTransport; + + // in lieu of lazy_static + fn dummy_error() -> Error { + Error::new(ErrorCode::Ambiguous, ErrorClass::Net, "bleh") + } + + impl SmartSubtransport for DummyTransport { + fn action( + &self, + _url: &str, + _service: Service, + ) -> Result, Error> { + Err(dummy_error()) + } + + fn close(&self) -> Result<(), Error> { + Ok(()) + } + } + + #[test] + fn transport_error_propagates() { + static INIT: Once = Once::new(); + + unsafe { + INIT.call_once(|| { + register("dummy", move |remote| { + Transport::smart(&remote, true, DummyTransport) + }) + .unwrap(); + }) + } + + let (_td, repo) = crate::test::repo_init(); + t!(repo.remote("origin", "dummy://ball")); + + let mut origin = t!(repo.find_remote("origin")); + + match origin.fetch(&["master"], None, None) { + Ok(()) => unreachable!(), + Err(e) => assert_eq!(e, dummy_error()), + } + } +} diff -Nru cargo-0.44.1/vendor/git2-curl/.cargo-checksum.json cargo-0.47.0/vendor/git2-curl/.cargo-checksum.json --- cargo-0.44.1/vendor/git2-curl/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2-curl/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"502d532a2d06184beb3bc869d4d90236e60934e3382c921b203fa3c33e212bd7"} \ No newline at end of file +{"files":{},"package":"883539cb0ea94bab3f8371a98cd8e937bbe9ee7c044499184aa4c17deb643a50"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/git2-curl/Cargo.toml cargo-0.47.0/vendor/git2-curl/Cargo.toml --- cargo-0.44.1/vendor/git2-curl/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2-curl/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "git2-curl" -version = "0.14.0" +version = "0.14.1" authors = ["Josh Triplett ", "Alex Crichton "] description = "Backend for an HTTP transport in libgit2 powered by libcurl.\n\nIntended to be used with the git2 crate.\n" documentation = "https://docs.rs/git2-curl" @@ -24,7 +24,7 @@ name = "all" harness = false [dependencies.curl] -version = "0.4" +version = "0.4.33" [dependencies.git2] version = "0.13" @@ -46,3 +46,6 @@ [dev-dependencies.tempfile] version = "3.0" + +[features] +zlib-ng-compat = ["git2/zlib-ng-compat", "curl/zlib-ng-compat"] diff -Nru cargo-0.44.1/vendor/git2-curl/src/lib.rs cargo-0.47.0/vendor/git2-curl/src/lib.rs --- cargo-0.44.1/vendor/git2-curl/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/git2-curl/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -15,7 +15,7 @@ //! > **NOTE**: At this time this crate likely does not support a `git push` //! > operation, only clones. -#![doc(html_root_url = "https://docs.rs/git2-curl/0.11")] +#![doc(html_root_url = "https://docs.rs/git2-curl/0.14")] #![deny(missing_docs)] #![warn(rust_2018_idioms)] #![cfg_attr(test, deny(warnings))] diff -Nru cargo-0.44.1/vendor/globset/Cargo.toml cargo-0.47.0/vendor/globset/Cargo.toml --- cargo-0.44.1/vendor/globset/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/globset/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -42,9 +42,9 @@ [dependencies.regex] version = "1.1.5" -[dependencies.serde] -version = "1.0.104" -optional = true +#[dependencies.serde] +#version = "1.0.104" +#optional = true [dev-dependencies.glob] version = "0.3.0" @@ -55,5 +55,5 @@ version = "1.0.41" [features] -serde1 = ["serde"] +#serde1 = ["serde"] simd-accel = [] diff -Nru cargo-0.44.1/vendor/globset/debian/patches/disable-feature-serde.diff cargo-0.47.0/vendor/globset/debian/patches/disable-feature-serde.diff --- cargo-0.44.1/vendor/globset/debian/patches/disable-feature-serde.diff 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/globset/debian/patches/disable-feature-serde.diff 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,24 @@ +Index: globset/Cargo.toml +=================================================================== +--- globset.orig/Cargo.toml ++++ globset/Cargo.toml +@@ -42,9 +42,9 @@ version = "0.4.5" + [dependencies.regex] + version = "1.1.5" + +-[dependencies.serde] +-version = "1.0.104" +-optional = true ++#[dependencies.serde] ++#version = "1.0.104" ++#optional = true + [dev-dependencies.glob] + version = "0.3.0" + +@@ -55,5 +55,5 @@ version = "1" + version = "1.0.41" + + [features] +-serde1 = ["serde"] ++#serde1 = ["serde"] + simd-accel = [] diff -Nru cargo-0.44.1/vendor/globset/debian/patches/series cargo-0.47.0/vendor/globset/debian/patches/series --- cargo-0.44.1/vendor/globset/debian/patches/series 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/globset/debian/patches/series 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1,2 @@ relax-test-dep.diff +disable-feature-serde.diff diff -Nru cargo-0.44.1/vendor/globset/.pc/applied-patches cargo-0.47.0/vendor/globset/.pc/applied-patches --- cargo-0.44.1/vendor/globset/.pc/applied-patches 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/globset/.pc/applied-patches 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1,2 @@ relax-test-dep.diff +disable-feature-serde.diff diff -Nru cargo-0.44.1/vendor/globset/.pc/disable-feature-serde.diff/Cargo.toml cargo-0.47.0/vendor/globset/.pc/disable-feature-serde.diff/Cargo.toml --- cargo-0.44.1/vendor/globset/.pc/disable-feature-serde.diff/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/globset/.pc/disable-feature-serde.diff/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,59 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "globset" +version = "0.4.5" +authors = ["Andrew Gallant "] +description = "Cross platform single glob and glob set matching. Glob set matching is the\nprocess of matching one or more glob patterns against a single candidate path\nsimultaneously, and returning all of the globs that matched.\n" +homepage = "https://github.com/BurntSushi/ripgrep/tree/master/crates/globset" +documentation = "https://docs.rs/globset" +readme = "README.md" +keywords = ["regex", "glob", "multiple", "set", "pattern"] +license = "Unlicense/MIT" +repository = "https://github.com/BurntSushi/ripgrep/tree/master/crates/globset" + +[lib] +name = "globset" +bench = false +[dependencies.aho-corasick] +version = "0.7.3" + +[dependencies.bstr] +version = "0.2.0" +features = ["std"] +default-features = false + +[dependencies.fnv] +version = "1.0.6" + +[dependencies.log] +version = "0.4.5" + +[dependencies.regex] +version = "1.1.5" + +[dependencies.serde] +version = "1.0.104" +optional = true +[dev-dependencies.glob] +version = "0.3.0" + +[dev-dependencies.lazy_static] +version = "1" + +[dev-dependencies.serde_json] +version = "1.0.41" + +[features] +serde1 = ["serde"] +simd-accel = [] diff -Nru cargo-0.44.1/vendor/humantime/.cargo-checksum.json cargo-0.47.0/vendor/humantime/.cargo-checksum.json --- cargo-0.44.1/vendor/humantime/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/humantime/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"b9b6c53306532d3c8e8087b44e6580e10db51a023cf9b433cea2ac38066b92da"} \ No newline at end of file +{"files":{},"package":"3c1ad908cc71012b7bea4d0c53ba96a8cba9962f048fa68d143376143d863b7a"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/humantime/Cargo.toml cargo-0.47.0/vendor/humantime/Cargo.toml --- cargo-0.44.1/vendor/humantime/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/humantime/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "humantime" -version = "2.0.0" +version = "2.0.1" authors = ["Paul Colomiets "] description = " A parser and formatter for std::time::{Duration, SystemTime}\n" homepage = "https://github.com/tailhook/humantime" @@ -22,6 +22,7 @@ keywords = ["time", "human", "human-friendly", "parser", "duration"] categories = ["date-and-time"] license = "MIT/Apache-2.0" +repository = "https://github.com/tailhook/humantime" [lib] name = "humantime" diff -Nru cargo-0.44.1/vendor/humantime/src/duration.rs cargo-0.47.0/vendor/humantime/src/duration.rs --- cargo-0.44.1/vendor/humantime/src/duration.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/humantime/src/duration.rs 2020-10-01 21:38:28.000000000 +0000 @@ -207,7 +207,7 @@ /// The duration object is a concatenation of time spans. Where each time /// span is an integer number and a suffix. Supported suffixes: /// -/// * `nsec`, `ns` -- microseconds +/// * `nsec`, `ns` -- nanoseconds /// * `usec`, `us` -- microseconds /// * `msec`, `ms` -- milliseconds /// * `seconds`, `second`, `sec`, `s` diff -Nru cargo-0.44.1/vendor/ignore/.cargo-checksum.json cargo-0.47.0/vendor/ignore/.cargo-checksum.json --- cargo-0.44.1/vendor/ignore/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ignore/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"128b9e89d15a3faa642ee164c998fd4fae3d89d054463cddb2c25a7baad3a352"} \ No newline at end of file +{"files":{},"package":"22dcbf2a4a289528dbef21686354904e1c694ac642610a9bff9e7df730d9ec72"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/ignore/Cargo.lock cargo-0.47.0/vendor/ignore/Cargo.lock --- cargo-0.44.1/vendor/ignore/Cargo.lock 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ignore/Cargo.lock 2020-10-01 21:38:28.000000000 +0000 @@ -17,9 +17,9 @@ [[package]] name = "bstr" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2889e6d50f394968c8bf4240dc3f2a7eb4680844d27308f798229ac9d4725f41" +checksum = "31accafdb70df7871592c058eca3985b71104e15ac32f64706022c58867da931" dependencies = [ "memchr", ] @@ -53,9 +53,9 @@ [[package]] name = "fnv" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "globset" @@ -72,7 +72,7 @@ [[package]] name = "ignore" -version = "0.4.15" +version = "0.4.16" dependencies = [ "crossbeam-channel", "crossbeam-utils", @@ -116,9 +116,9 @@ [[package]] name = "regex" -version = "1.3.7" +version = "1.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6020f034922e3194c711b82a627453881bc4682166cabb07134a10c26ba7692" +checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" dependencies = [ "aho-corasick", "memchr", @@ -128,9 +128,9 @@ [[package]] name = "regex-syntax" -version = "0.6.17" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae" +checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" [[package]] name = "same-file" diff -Nru cargo-0.44.1/vendor/ignore/Cargo.toml cargo-0.47.0/vendor/ignore/Cargo.toml --- cargo-0.44.1/vendor/ignore/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ignore/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "ignore" -version = "0.4.15" +version = "0.4.16" authors = ["Andrew Gallant "] description = "A fast library for efficiently matching ignore files such as `.gitignore`\nagainst file paths.\n" homepage = "https://github.com/BurntSushi/ripgrep/tree/master/crates/ignore" @@ -29,7 +29,7 @@ version = "0.7.0" [dependencies.globset] -version = "0.4.3" +version = "0.4.5" [dependencies.lazy_static] version = "1.1" diff -Nru cargo-0.44.1/vendor/ignore/src/default_types.rs cargo-0.47.0/vendor/ignore/src/default_types.rs --- cargo-0.44.1/vendor/ignore/src/default_types.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ignore/src/default_types.rs 2020-10-01 21:38:28.000000000 +0000 @@ -135,6 +135,7 @@ ("markdown", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]), ("matlab", &["*.m"]), ("md", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]), + ("meson", &["meson.build", "meson_options.txt"]), ("mk", &["mkfile"]), ("ml", &["*.ml"]), ("msbuild", &[ diff -Nru cargo-0.44.1/vendor/im-rc/.cargo-checksum.json cargo-0.47.0/vendor/im-rc/.cargo-checksum.json --- cargo-0.44.1/vendor/im-rc/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"303f7e6256d546e01979071417432425f15c1891fb309a5f2d724ee908fabd6e"} \ No newline at end of file +{"files":{},"package":"3ca8957e71f04a205cb162508f9326aea04676c8dfd0711220190d6b83664f3f"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/im-rc/Cargo.toml cargo-0.47.0/vendor/im-rc/Cargo.toml --- cargo-0.44.1/vendor/im-rc/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "im-rc" -version = "14.3.0" +version = "15.0.0" authors = ["Bodil Stokke "] build = "./build.rs" description = "Immutable collection datatypes (the fast but not thread safe version)" @@ -34,7 +34,7 @@ optional = true [dependencies.bitmaps] -version = "2.0.0" +version = "2" [dependencies.proptest] version = "0.9" @@ -48,27 +48,27 @@ version = "0.5.1" [dependencies.rand_xoshiro] -version = "0.4.0" +version = "0.4" [dependencies.rayon] -version = "1.0" +version = "1" optional = true [dependencies.refpool] -version = "0.2.2" +version = "0.4" optional = true [dependencies.serde] -version = "1.0" +version = "1" optional = true [dependencies.sized-chunks] -version = ">= 0.5.1, < 0.7" +version = "0.6" [dependencies.typenum] -version = "1.10" +version = "1.12" [dev-dependencies.metrohash] -version = "1.0.6" +version = "1" [dev-dependencies.pretty_assertions] version = "0.6" @@ -77,21 +77,25 @@ version = "0.9" [dev-dependencies.proptest-derive] -version = "0.1.0" +version = "0.1" [dev-dependencies.rand] version = "0.7" features = ["small_rng"] [dev-dependencies.rayon] -version = "1.0" +version = "1" [dev-dependencies.serde] -version = "1.0" +version = "1" [dev-dependencies.serde_json] -version = "1.0" +version = "1" [build-dependencies.version_check] version = "0.9" + +[features] +debug = [] +pool = ["refpool", "sized-chunks/refpool"] [badges.travis-ci] repository = "bodil/im-rs" diff -Nru cargo-0.44.1/vendor/im-rc/CHANGELOG.md cargo-0.47.0/vendor/im-rc/CHANGELOG.md --- cargo-0.44.1/vendor/im-rc/CHANGELOG.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/CHANGELOG.md 2020-10-01 21:38:28.000000000 +0000 @@ -5,6 +5,33 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## [15.0.0] - 2020-05-15 + +### Changed + +- Map iterators now return `(&K, &V)` and `(&K, &mut V)` respectively, to be consistent with + `std::collections`'s API. `DiffIter` for `OrdMap` has also changed in the same manner. (#121) + +### Removed + +- The `pool` feature flag has been removed from the `im` version of the crate, as `refpool` no + longer supports threadsafe pools. +- `HashSet::iter_mut()` has been removed, because if you modify the hashed values in a hash set, + you break the hash set. + +### Added + +- The `pool` feature flag was missing from the `im-rc` version of the crate, which is the version + where it's actually useful. It's been added now. +- `DiffIter` now has a `Debug` implementation. +- There is now a `Vector::is_inline()` method to determine whether a `Vector` is currently + inlined. (#129) + +### Fixed + +- A smarter implementation of the sorting algorithm for `Vector` has improved the performance of + `Vector::sort` by approximately 2x. (#126) + ## [14.3.0] - 2020-03-03 ### Changed diff -Nru cargo-0.44.1/vendor/im-rc/debian/patches/relax-deps.patch cargo-0.47.0/vendor/im-rc/debian/patches/relax-deps.patch --- cargo-0.44.1/vendor/im-rc/debian/patches/relax-deps.patch 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/debian/patches/relax-deps.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -Index: im-rc/Cargo.toml -=================================================================== ---- im-rc.orig/Cargo.toml -+++ im-rc/Cargo.toml -@@ -63,7 +63,7 @@ version = "1.0" - optional = true - - [dependencies.sized-chunks] --version = "0.5.1" -+version = ">= 0.5.1, < 0.7" - - [dependencies.typenum] - version = "1.10" diff -Nru cargo-0.44.1/vendor/im-rc/debian/patches/series cargo-0.47.0/vendor/im-rc/debian/patches/series --- cargo-0.44.1/vendor/im-rc/debian/patches/series 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/debian/patches/series 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -relax-deps.patch diff -Nru cargo-0.44.1/vendor/im-rc/.pc/applied-patches cargo-0.47.0/vendor/im-rc/.pc/applied-patches --- cargo-0.44.1/vendor/im-rc/.pc/applied-patches 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/.pc/applied-patches 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -relax-deps.patch diff -Nru cargo-0.44.1/vendor/im-rc/.pc/.quilt_patches cargo-0.47.0/vendor/im-rc/.pc/.quilt_patches --- cargo-0.44.1/vendor/im-rc/.pc/.quilt_patches 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/.pc/.quilt_patches 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -debian/patches diff -Nru cargo-0.44.1/vendor/im-rc/.pc/.quilt_series cargo-0.47.0/vendor/im-rc/.pc/.quilt_series --- cargo-0.44.1/vendor/im-rc/.pc/.quilt_series 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/.pc/.quilt_series 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -series diff -Nru cargo-0.44.1/vendor/im-rc/.pc/relax-deps.patch/Cargo.toml cargo-0.47.0/vendor/im-rc/.pc/relax-deps.patch/Cargo.toml --- cargo-0.44.1/vendor/im-rc/.pc/relax-deps.patch/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/.pc/relax-deps.patch/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -edition = "2018" -name = "im-rc" -version = "14.3.0" -authors = ["Bodil Stokke "] -build = "./build.rs" -description = "Immutable collection datatypes (the fast but not thread safe version)" -homepage = "http://immutable.rs/" -documentation = "http://immutable.rs/" -readme = "../../README.md" -keywords = ["immutable", "persistent", "hamt", "b-tree", "rrb-tree"] -categories = ["data-structures"] -license = "MPL-2.0+" -repository = "https://github.com/bodil/im-rs" -[package.metadata.docs.rs] -all-features = true - -[lib] -path = "./src/lib.rs" -[dependencies.arbitrary] -version = "0.4" -optional = true - -[dependencies.bitmaps] -version = "2.0.0" - -[dependencies.proptest] -version = "0.9" -optional = true - -[dependencies.quickcheck] -version = "0.9" -optional = true - -[dependencies.rand_core] -version = "0.5.1" - -[dependencies.rand_xoshiro] -version = "0.4.0" - -[dependencies.rayon] -version = "1.0" -optional = true - -[dependencies.refpool] -version = "0.2.2" -optional = true - -[dependencies.serde] -version = "1.0" -optional = true - -[dependencies.sized-chunks] -version = "0.5.1" - -[dependencies.typenum] -version = "1.10" -[dev-dependencies.metrohash] -version = "1.0.6" - -[dev-dependencies.pretty_assertions] -version = "0.6" - -[dev-dependencies.proptest] -version = "0.9" - -[dev-dependencies.proptest-derive] -version = "0.1.0" - -[dev-dependencies.rand] -version = "0.7" -features = ["small_rng"] - -[dev-dependencies.rayon] -version = "1.0" - -[dev-dependencies.serde] -version = "1.0" - -[dev-dependencies.serde_json] -version = "1.0" -[build-dependencies.version_check] -version = "0.9" -[badges.travis-ci] -repository = "bodil/im-rs" diff -Nru cargo-0.44.1/vendor/im-rc/.pc/.version cargo-0.47.0/vendor/im-rc/.pc/.version --- cargo-0.44.1/vendor/im-rc/.pc/.version 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/.pc/.version 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -2 diff -Nru cargo-0.44.1/vendor/im-rc/src/hash/map.rs cargo-0.47.0/vendor/im-rc/src/hash/map.rs --- cargo-0.44.1/vendor/im-rc/src/hash/map.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/src/hash/map.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1600,9 +1600,7 @@ if Ref::ptr_eq(&self.hasher, &other.hasher) { return self.iter().partial_cmp(other.iter()); } - let m1: ::std::collections::HashMap = self.iter().cloned().collect(); - let m2: ::std::collections::HashMap = other.iter().cloned().collect(); - m1.iter().partial_cmp(m2.iter()) + self.iter().partial_cmp(other.iter()) } } @@ -1616,9 +1614,7 @@ if Ref::ptr_eq(&self.hasher, &other.hasher) { return self.iter().cmp(other.iter()); } - let m1: ::std::collections::HashMap = self.iter().cloned().collect(); - let m2: ::std::collections::HashMap = other.iter().cloned().collect(); - m1.iter().cmp(m2.iter()) + self.iter().cmp(other.iter()) } } @@ -1801,10 +1797,10 @@ } impl<'a, K, V> Iterator for Iter<'a, K, V> { - type Item = &'a (K, V); + type Item = (&'a K, &'a V); fn next(&mut self) -> Option { - self.it.next().map(|(p, _)| p) + self.it.next().map(|((k, v), _)| (k, v)) } fn size_hint(&self) -> (usize, Option) { @@ -1830,10 +1826,10 @@ K: Clone, V: Clone, { - type Item = &'a mut V; + type Item = (&'a K, &'a mut V); fn next(&mut self) -> Option { - self.it.next().map(|(entry, _)| &mut entry.1) + self.it.next().map(|((k, v), _)| (&*k, v)) } fn size_hint(&self) -> (usize, Option) { @@ -1926,7 +1922,7 @@ K: Hash + Eq, S: BuildHasher, { - type Item = &'a (K, V); + type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; #[inline] diff -Nru cargo-0.44.1/vendor/im-rc/src/hash/set.rs cargo-0.47.0/vendor/im-rc/src/hash/set.rs --- cargo-0.44.1/vendor/im-rc/src/hash/set.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/src/hash/set.rs 2020-10-01 21:38:28.000000000 +0000 @@ -31,9 +31,7 @@ use std::iter::{FromIterator, IntoIterator, Sum}; use std::ops::{Add, Deref, Mul}; -use crate::nodes::hamt::{ - hash_key, Drain as NodeDrain, HashValue, Iter as NodeIter, IterMut as NodeIterMut, Node, -}; +use crate::nodes::hamt::{hash_key, Drain as NodeDrain, HashValue, Iter as NodeIter, Node}; use crate::ordset::OrdSet; use crate::util::{Pool, PoolRef, Ref}; @@ -395,20 +393,6 @@ A: Hash + Eq + Clone, S: BuildHasher, { - /// Get a mutable iterator over the values in a hash set. - /// - /// Please note that the order is consistent between sets using the same - /// hasher, but no other ordering guarantee is offered. Items will not come - /// out in insertion order or sort order. They will, however, come out in - /// the same order every time for the same set. - #[must_use] - pub fn iter_mut(&mut self) -> IterMut<'_, A> { - let root = PoolRef::make_mut(&self.pool.0, &mut self.root); - IterMut { - it: NodeIterMut::new(&self.pool.0, root, self.size), - } - } - /// Insert a value into a set. /// /// Time: O(log n) @@ -689,9 +673,7 @@ if Ref::ptr_eq(&self.hasher, &other.hasher) { return self.iter().partial_cmp(other.iter()); } - let m1: ::std::collections::HashSet = self.iter().cloned().collect(); - let m2: ::std::collections::HashSet = other.iter().cloned().collect(); - m1.iter().partial_cmp(m2.iter()) + self.iter().partial_cmp(other.iter()) } } @@ -704,9 +686,7 @@ if Ref::ptr_eq(&self.hasher, &other.hasher) { return self.iter().cmp(other.iter()); } - let m1: ::std::collections::HashSet = self.iter().cloned().collect(); - let m2: ::std::collections::HashSet = other.iter().cloned().collect(); - m1.iter().cmp(m2.iter()) + self.iter().cmp(other.iter()) } } @@ -876,30 +856,6 @@ impl<'a, A> FusedIterator for Iter<'a, A> {} -/// A mutable iterator over the elements of a set. -pub struct IterMut<'a, A> { - it: NodeIterMut<'a, Value>, -} - -impl<'a, A> Iterator for IterMut<'a, A> -where - A: 'a + Clone, -{ - type Item = &'a mut A; - - fn next(&mut self) -> Option { - self.it.next().map(|(v, _)| &mut v.0) - } - - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'a, A> ExactSizeIterator for IterMut<'a, A> where A: Clone {} - -impl<'a, A> FusedIterator for IterMut<'a, A> where A: Clone {} - /// A consuming iterator over the elements of a set. pub struct ConsumingIter where diff -Nru cargo-0.44.1/vendor/im-rc/src/lib.rs cargo-0.47.0/vendor/im-rc/src/lib.rs --- cargo-0.44.1/vendor/im-rc/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -303,7 +303,7 @@ //! //! | Feature | Description | //! | ------- | ----------- | -//! | [`pool`](https://crates.io/crates/refpool) | Constructors and pool types for [`refpool`](https://crates.io/crates/refpool) memory pools (recommended only for `im-rc`) | +//! | [`pool`](https://crates.io/crates/refpool) | Constructors and pool types for [`refpool`](https://crates.io/crates/refpool) memory pools (only available in `im-rc`) | //! | [`proptest`](https://crates.io/crates/proptest) | Strategies for all `im` datatypes under a `proptest` namespace, eg. `im::vector::proptest::vector()` | //! | [`quickcheck`](https://crates.io/crates/quickcheck) | [`quickcheck::Arbitrary`](https://docs.rs/quickcheck/latest/quickcheck/trait.Arbitrary.html) implementations for all `im` datatypes (not available in `im-rc`) | //! | [`rayon`](https://crates.io/crates/rayon) | parallel iterator implementations for [`Vector`][vector::Vector] (not available in `im-rc`) | @@ -324,11 +324,11 @@ //! [std::hash::Hash]: https://doc.rust-lang.org/std/hash/trait.Hash.html //! [std::marker::Send]: https://doc.rust-lang.org/std/marker/trait.Send.html //! [std::marker::Sync]: https://doc.rust-lang.org/std/marker/trait.Sync.html -//! [hashmap::HashMap]: ./hashmap/struct.HashMap.html -//! [hashset::HashSet]: ./hashset/struct.HashSet.html -//! [ordmap::OrdMap]: ./ordmap/struct.OrdMap.html -//! [ordset::OrdSet]: ./ordset/struct.OrdSet.html -//! [vector::Vector]: ./vector/enum.Vector.html +//! [hashmap::HashMap]: ./struct.HashMap.html +//! [hashset::HashSet]: ./struct.HashSet.html +//! [ordmap::OrdMap]: ./struct.OrdMap.html +//! [ordset::OrdSet]: ./struct.OrdSet.html +//! [vector::Vector]: ./struct.Vector.html //! [vector::Vector::push_back]: ./vector/enum.Vector.html#method.push_back //! [rrb-tree]: https://infoscience.epfl.ch/record/213452/files/rrbvector.pdf //! [hamt]: https://en.wikipedia.org/wiki/Hash_array_mapped_trie @@ -382,9 +382,14 @@ #[doc(hidden)] pub mod quickcheck; -#[cfg(not(feature = "pool"))] +#[cfg(any(threadsafe, not(feature = "pool")))] mod fakepool; +#[cfg(all(threadsafe, feature = "pool"))] +compile_error!( + "The `pool` feature is not threadsafe but you've enabled it on a threadsafe version of `im`." +); + pub use crate::hashmap::HashMap; pub use crate::hashset::HashSet; pub use crate::ordmap::OrdMap; diff -Nru cargo-0.44.1/vendor/im-rc/src/nodes/btree.rs cargo-0.47.0/vendor/im-rc/src/nodes/btree.rs --- cargo-0.44.1/vendor/im-rc/src/nodes/btree.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/src/nodes/btree.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1227,7 +1227,7 @@ } /// A description of a difference between two ordered sets. -#[derive(PartialEq, Eq)] +#[derive(PartialEq, Eq, Debug)] pub enum DiffItem<'a, A> { /// This value has been added to the new set. Add(&'a A), diff -Nru cargo-0.44.1/vendor/im-rc/src/nodes/rrb.rs cargo-0.47.0/vendor/im-rc/src/nodes/rrb.rs --- cargo-0.44.1/vendor/im-rc/src/nodes/rrb.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/src/nodes/rrb.rs 2020-10-01 21:38:28.000000000 +0000 @@ -35,7 +35,7 @@ fn size(&self) -> usize { match self { Size::Size(s) => *s, - Size::Table(sizes) => sizes.iter().sum(), + Size::Table(sizes) => *sizes.last().unwrap_or(&0), } } @@ -49,11 +49,12 @@ fn table_from_size(pool: &Pool>, level: usize, size: usize) -> Self { let mut chunk = Chunk::new(); let mut remaining = size; - let child_size = NODE_SIZE.pow(level as u32); - while remaining > child_size { - let next_value = chunk.last().unwrap_or(&0) + child_size; - chunk.push_back(next_value); - remaining -= child_size; + if let Some(child_size) = NODE_SIZE.checked_pow(level as u32) { + while remaining > child_size { + let next_value = chunk.last().unwrap_or(&0) + child_size; + chunk.push_back(next_value); + remaining -= child_size; + } } if remaining > 0 { let next_value = chunk.last().unwrap_or(&0) + remaining; @@ -395,7 +396,13 @@ fn is_completely_dense(&self, level: usize) -> bool { // Size of a full node is NODE_SIZE at level 0, NODE_SIZE² at // level 1, etc. - self.size() == NODE_SIZE.pow(level as u32 + 1) + if let Some(expected_size) = NODE_SIZE.checked_pow(level as u32 + 1) { + self.size() == expected_size + } else { + // We overflowed a usize, there's no way we can be completely dense as we know the size + // fits in a usize. + false + } } #[inline] @@ -444,7 +451,11 @@ } fn index_in(&self, level: usize, index: usize) -> Option { - let mut target_idx = index / NODE_SIZE.pow(level as u32); + let mut target_idx = if let Some(child_size) = NODE_SIZE.checked_pow(level as u32) { + index / child_size + } else { + 0 + }; if target_idx >= self.children.len() { return None; } diff -Nru cargo-0.44.1/vendor/im-rc/src/ord/map.rs cargo-0.47.0/vendor/im-rc/src/ord/map.rs --- cargo-0.44.1/vendor/im-rc/src/ord/map.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/src/ord/map.rs 2020-10-01 21:38:28.000000000 +0000 @@ -32,7 +32,9 @@ use crate::util::linear_search_by; use crate::util::{Pool, PoolRef}; -pub use crate::nodes::btree::{ConsumingIter, DiffItem, DiffIter, Iter as RangedIter}; +pub use crate::nodes::btree::{ + ConsumingIter, DiffItem as NodeDiffItem, DiffIter as NodeDiffIter, Iter as RangedIter, +}; /// Construct a map from a sequence of key/value pairs. /// @@ -354,7 +356,7 @@ /// Get an iterator over the key/value pairs of a map. #[must_use] - pub fn iter(&self) -> Iter<'_, (K, V)> { + pub fn iter(&self) -> Iter<'_, K, V> { Iter { it: RangedIter::new(&self.root, self.size, ..), } @@ -362,13 +364,15 @@ /// Create an iterator over a range of key/value pairs. #[must_use] - pub fn range(&self, range: R) -> RangedIter<'_, (K, V)> + pub fn range(&self, range: R) -> Iter<'_, K, V> where R: RangeBounds, K: Borrow, BK: Ord + ?Sized, { - RangedIter::new(&self.root, self.size, range) + Iter { + it: RangedIter::new(&self.root, self.size, range), + } } /// Get an iterator over a map's keys. @@ -395,8 +399,10 @@ /// the two maps, minus the number of elements belonging to nodes /// shared between them) #[must_use] - pub fn diff<'a>(&'a self, other: &'a Self) -> DiffIter<'a, (K, V)> { - DiffIter::new(&self.root, &other.root) + pub fn diff<'a>(&'a self, other: &'a Self) -> DiffIter<'a, K, V> { + DiffIter { + it: NodeDiffIter::new(&self.root, &other.root), + } } /// Get the value for a key from a map. @@ -1398,14 +1404,20 @@ /// map. #[must_use] pub fn take(&self, n: usize) -> Self { - self.iter().take(n).cloned().collect() + self.iter() + .take(n) + .map(|(k, v)| (k.clone(), v.clone())) + .collect() } /// Construct a map with the `n` smallest keys removed from a /// given map. #[must_use] pub fn skip(&self, n: usize) -> Self { - self.iter().skip(n).cloned().collect() + self.iter() + .skip(n) + .map(|(k, v)| (k.clone(), v.clone())) + .collect() } /// Remove the smallest key from a map, and return its value as @@ -1809,7 +1821,7 @@ { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { let mut d = f.debug_map(); - for (k, v) in self { + for (k, v) in self.iter() { d.entry(k, v); } d.finish() @@ -1819,18 +1831,18 @@ // Iterators /// An iterator over the key/value pairs of a map. -pub struct Iter<'a, A> { - it: RangedIter<'a, A>, +pub struct Iter<'a, K, V> { + it: RangedIter<'a, (K, V)>, } -impl<'a, A> Iterator for Iter<'a, A> +impl<'a, K, V> Iterator for Iter<'a, K, V> where - A: 'a + BTreeValue, + (K, V): 'a + BTreeValue, { - type Item = &'a A; + type Item = (&'a K, &'a V); fn next(&mut self) -> Option { - self.it.next() + self.it.next().map(|(k, v)| (k, v)) } fn size_hint(&self) -> (usize, Option) { @@ -1838,20 +1850,62 @@ } } -impl<'a, A> DoubleEndedIterator for Iter<'a, A> +impl<'a, K, V> DoubleEndedIterator for Iter<'a, K, V> where - A: 'a + BTreeValue, + (K, V): 'a + BTreeValue, { fn next_back(&mut self) -> Option { - self.it.next_back() + self.it.next_back().map(|(k, v)| (k, v)) } } -impl<'a, A> ExactSizeIterator for Iter<'a, A> where A: 'a + BTreeValue {} +impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> where (K, V): 'a + BTreeValue {} + +/// An iterator over the differences between two maps. +pub struct DiffIter<'a, K, V> { + it: NodeDiffIter<'a, (K, V)>, +} + +/// A description of a difference between two ordered maps. +#[derive(PartialEq, Eq, Debug)] +pub enum DiffItem<'a, K, V> { + /// This value has been added to the new map. + Add(&'a K, &'a V), + /// This value has been changed between the two maps. + Update { + /// The old value. + old: (&'a K, &'a V), + /// The new value. + new: (&'a K, &'a V), + }, + /// This value has been removed from the new map. + Remove(&'a K, &'a V), +} + +impl<'a, K, V> Iterator for DiffIter<'a, K, V> +where + (K, V): 'a + BTreeValue + PartialEq, +{ + type Item = DiffItem<'a, K, V>; + + fn next(&mut self) -> Option { + self.it.next().map(|item| match item { + NodeDiffItem::Add((k, v)) => DiffItem::Add(k, v), + NodeDiffItem::Update { + old: (oldk, oldv), + new: (newk, newv), + } => DiffItem::Update { + old: (oldk, oldv), + new: (newk, newv), + }, + NodeDiffItem::Remove((k, v)) => DiffItem::Remove(k, v), + }) + } +} /// An iterator ove the keys of a map. pub struct Keys<'a, K, V> { - it: Iter<'a, (K, V)>, + it: Iter<'a, K, V>, } impl<'a, K, V> Iterator for Keys<'a, K, V> @@ -1862,10 +1916,7 @@ type Item = &'a K; fn next(&mut self) -> Option { - match self.it.next() { - None => None, - Some((k, _)) => Some(k), - } + self.it.next().map(|(k, _)| k) } fn size_hint(&self) -> (usize, Option) { @@ -1895,7 +1946,7 @@ /// An iterator over the values of a map. pub struct Values<'a, K, V> { - it: Iter<'a, (K, V)>, + it: Iter<'a, K, V>, } impl<'a, K, V> Iterator for Values<'a, K, V> @@ -1906,10 +1957,7 @@ type Item = &'a V; fn next(&mut self) -> Option { - match self.it.next() { - None => None, - Some((_, v)) => Some(v), - } + self.it.next().map(|(_, v)| v) } fn size_hint(&self) -> (usize, Option) { @@ -1958,8 +2006,8 @@ where K: Ord, { - type Item = &'a (K, V); - type IntoIter = Iter<'a, (K, V)>; + type Item = (&'a K, &'a V); + type IntoIter = Iter<'a, K, V>; fn into_iter(self) -> Self::IntoIter { self.iter() @@ -2105,7 +2153,7 @@ for OrdMap { fn from(m: &'a HashMap) -> Self { - m.iter().cloned().collect() + m.iter().map(|(k, v)| (k.clone(), v.clone())).collect() } } @@ -2125,7 +2173,6 @@ #[cfg(test)] mod test { use super::*; - use crate::nodes::btree::DiffItem; use crate::proptest::*; use crate::test::is_sorted; use ::proptest::num::{i16, usize}; @@ -2145,15 +2192,15 @@ 6 => 66 }; let mut it = map.iter(); - assert_eq!(it.next(), Some(&(1, 11))); - assert_eq!(it.next(), Some(&(2, 22))); - assert_eq!(it.next(), Some(&(3, 33))); - assert_eq!(it.next(), Some(&(4, 44))); - assert_eq!(it.next(), Some(&(5, 55))); - assert_eq!(it.next(), Some(&(6, 66))); - assert_eq!(it.next(), Some(&(7, 77))); - assert_eq!(it.next(), Some(&(8, 88))); - assert_eq!(it.next(), Some(&(9, 99))); + assert_eq!(it.next(), Some((&1, &11))); + assert_eq!(it.next(), Some((&2, &22))); + assert_eq!(it.next(), Some((&3, &33))); + assert_eq!(it.next(), Some((&4, &44))); + assert_eq!(it.next(), Some((&5, &55))); + assert_eq!(it.next(), Some((&6, &66))); + assert_eq!(it.next(), Some((&7, &77))); + assert_eq!(it.next(), Some((&8, &88))); + assert_eq!(it.next(), Some((&9, &99))); assert_eq!(it.next(), None); } @@ -2195,14 +2242,14 @@ let (popped, less) = map.extract(&5).unwrap(); assert_eq!(popped, 55); let mut it = less.iter(); - assert_eq!(it.next(), Some(&(1, 11))); - assert_eq!(it.next(), Some(&(2, 22))); - assert_eq!(it.next(), Some(&(3, 33))); - assert_eq!(it.next(), Some(&(4, 44))); - assert_eq!(it.next(), Some(&(6, 66))); - assert_eq!(it.next(), Some(&(7, 77))); - assert_eq!(it.next(), Some(&(8, 88))); - assert_eq!(it.next(), Some(&(9, 99))); + assert_eq!(it.next(), Some((&1, &11))); + assert_eq!(it.next(), Some((&2, &22))); + assert_eq!(it.next(), Some((&3, &33))); + assert_eq!(it.next(), Some((&4, &44))); + assert_eq!(it.next(), Some((&6, &66))); + assert_eq!(it.next(), Some((&7, &77))); + assert_eq!(it.next(), Some((&8, &88))); + assert_eq!(it.next(), Some((&9, &99))); assert_eq!(it.next(), None); } @@ -2240,10 +2287,10 @@ fn double_ended_iterator_1() { let m = ordmap! {1 => 1, 2 => 2, 3 => 3, 4 => 4}; let mut it = m.iter(); - assert_eq!(Some(&(1, 1)), it.next()); - assert_eq!(Some(&(4, 4)), it.next_back()); - assert_eq!(Some(&(2, 2)), it.next()); - assert_eq!(Some(&(3, 3)), it.next_back()); + assert_eq!(Some((&1, &1)), it.next()); + assert_eq!(Some((&4, &4)), it.next_back()); + assert_eq!(Some((&2, &2)), it.next()); + assert_eq!(Some((&3, &3)), it.next_back()); assert_eq!(None, it.next()); } @@ -2251,10 +2298,10 @@ fn double_ended_iterator_2() { let m = ordmap! {1 => 1, 2 => 2, 3 => 3, 4 => 4}; let mut it = m.iter(); - assert_eq!(Some(&(1, 1)), it.next()); - assert_eq!(Some(&(4, 4)), it.next_back()); - assert_eq!(Some(&(2, 2)), it.next()); - assert_eq!(Some(&(3, 3)), it.next_back()); + assert_eq!(Some((&1, &1)), it.next()); + assert_eq!(Some((&4, &4)), it.next_back()); + assert_eq!(Some((&2, &2)), it.next()); + assert_eq!(Some((&3, &3)), it.next_back()); assert_eq!(None, it.next_back()); } @@ -2308,25 +2355,25 @@ #[test] fn ranged_iter() { let map: OrdMap = ordmap![1=>2, 2=>3, 3=>4, 4=>5, 5=>6]; - let range: Vec<(i32, i32)> = map.range(..).cloned().collect(); + let range: Vec<(i32, i32)> = map.range(..).map(|(k, v)| (*k, *v)).collect(); assert_eq!(vec![(1, 2), (2, 3), (3, 4), (4, 5), (5, 6)], range); - let range: Vec<(i32, i32)> = map.range(..).rev().cloned().collect(); + let range: Vec<(i32, i32)> = map.range(..).rev().map(|(k, v)| (*k, *v)).collect(); assert_eq!(vec![(5, 6), (4, 5), (3, 4), (2, 3), (1, 2)], range); - let range: Vec<(i32, i32)> = map.range(2..5).cloned().collect(); + let range: Vec<(i32, i32)> = map.range(2..5).map(|(k, v)| (*k, *v)).collect(); assert_eq!(vec![(2, 3), (3, 4), (4, 5)], range); - let range: Vec<(i32, i32)> = map.range(2..5).rev().cloned().collect(); + let range: Vec<(i32, i32)> = map.range(2..5).rev().map(|(k, v)| (*k, *v)).collect(); assert_eq!(vec![(4, 5), (3, 4), (2, 3)], range); - let range: Vec<(i32, i32)> = map.range(3..).cloned().collect(); + let range: Vec<(i32, i32)> = map.range(3..).map(|(k, v)| (*k, *v)).collect(); assert_eq!(vec![(3, 4), (4, 5), (5, 6)], range); - let range: Vec<(i32, i32)> = map.range(3..).rev().cloned().collect(); + let range: Vec<(i32, i32)> = map.range(3..).rev().map(|(k, v)| (*k, *v)).collect(); assert_eq!(vec![(5, 6), (4, 5), (3, 4)], range); - let range: Vec<(i32, i32)> = map.range(..4).cloned().collect(); + let range: Vec<(i32, i32)> = map.range(..4).map(|(k, v)| (*k, *v)).collect(); assert_eq!(vec![(1, 2), (2, 3), (3, 4)], range); - let range: Vec<(i32, i32)> = map.range(..4).rev().cloned().collect(); + let range: Vec<(i32, i32)> = map.range(..4).rev().map(|(k, v)| (*k, *v)).collect(); assert_eq!(vec![(3, 4), (2, 3), (1, 2)], range); - let range: Vec<(i32, i32)> = map.range(..=3).cloned().collect(); + let range: Vec<(i32, i32)> = map.range(..=3).map(|(k, v)| (*k, *v)).collect(); assert_eq!(vec![(1, 2), (2, 3), (3, 4)], range); - let range: Vec<(i32, i32)> = map.range(..=3).rev().cloned().collect(); + let range: Vec<(i32, i32)> = map.range(..=3).rev().map(|(k, v)| (*k, *v)).collect(); assert_eq!(vec![(3, 4), (2, 3), (1, 2)], range); } @@ -2379,7 +2426,7 @@ ref ops in collection::vec((bool::ANY, usize::ANY, usize::ANY), 1..1000) ) { let mut map = input.clone(); - let mut tree: collections::BTreeMap = input.iter().cloned().collect(); + let mut tree: collections::BTreeMap = input.iter().map(|(k, v)| (*k, *v)).collect(); for (ins, key, val) in ops { if *ins { tree.insert(*key, *val); @@ -2434,7 +2481,7 @@ fn lookup(ref m in ord_map(i16::ANY, i16::ANY, 0..1000)) { let map: OrdMap = FromIterator::from_iter(m.iter().map(|(k, v)| (*k, *v))); - for (k, v) in m { + for (k, v) in m.iter() { assert_eq!(Some(*v), map.get(k).cloned()); } } @@ -2518,51 +2565,32 @@ } #[test] - fn diff_added_values(a in ord_map(i16::ANY, i16::ANY, 0..1000), b in ord_map(i16::ANY, i16::ANY, 0..1000)) { - let ab = a.clone().union(b.clone()); - assert!(a.diff(&ab).eq(b.iter().filter(|&(ref k, _)| !a.contains_key(k)).map(DiffItem::Add))); - } - - // fn diff_updated_values(a: Vec<(usize, usize)>, b: Vec<(usize, usize)>) -> bool { - // let a: OrdMap = OrdMap::from(a); - // let b: OrdMap = OrdMap::from(b); - // let ab: OrdMap = a.union(&b); - // let ba: OrdMap = ab.union_with(&b, |_, b| *b); - // ab.diff(&ba).eq(b.iter().filter(|&(ref k, ref v)| ab.get(k) != Some(&v)) - // .map(|(k, v)| DiffItem::Update { - // old: &(*k, *(ab.get(&k).unwrap())), - // new: &(*k, *v) - // })) - // } - - #[test] - fn diff_removed_values(a in ord_map(i16::ANY, i16::ANY, 0..1000), b in ord_map(i16::ANY, i16::ANY, 0..1000)) { - let ab = a.clone().union(b.clone()); - assert!(ab.diff(&a).eq(b.iter().filter(|&(ref k, _)| !a.contains_key(k)).map(DiffItem::Remove))); - } - - // fn diff_all_values(a: Vec<(usize, usize)>, b: Vec<(usize, usize)>) -> bool { - // let a: OrdMap = OrdMap::from(a); - // let b: OrdMap = OrdMap::from(b); - // a.diff(&b).eq(b.union(&a).iter().filter_map(|(k, v)| { - // if a.contains_key(&k) { - // if b.contains_key(&k) { - // let old = a.get(&k).unwrap(); - // if old != v { - // Some(DiffItem::Update { - // old: &(*k, *old), - // new: &(*k, *v), - // }) - // } else { - // None - // } - // } else { - // Some(DiffItem::Remove(&(*k, *v))) - // } - // } else { - // Some(DiffItem::Add(&(*k, *v))) - // } - // })) - // } + fn diff_all_values(a in collection::vec((usize::ANY, usize::ANY), 1..1000), b in collection::vec((usize::ANY, usize::ANY), 1..1000)) { + let a: OrdMap = OrdMap::from(a); + let b: OrdMap = OrdMap::from(b); + + let diff: Vec<_> = a.diff(&b).collect(); + let union = b.clone().union(a.clone()); + let expected: Vec<_> = union.iter().filter_map(|(k, v)| { + if a.contains_key(&k) { + if b.contains_key(&k) { + let old = a.get(&k).unwrap(); + if old != v { + Some(DiffItem::Update { + old: (k, old), + new: (k, v), + }) + } else { + None + } + } else { + Some(DiffItem::Remove(k, v)) + } + } else { + Some(DiffItem::Add(k, v)) + } + }).collect(); + assert_eq!(expected, diff); + } } } diff -Nru cargo-0.44.1/vendor/im-rc/src/sort.rs cargo-0.47.0/vendor/im-rc/src/sort.rs --- cargo-0.44.1/vendor/im-rc/src/sort.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/src/sort.rs 2020-10-01 21:38:28.000000000 +0000 @@ -5,6 +5,7 @@ use crate::vector::FocusMut; use rand_core::{RngCore, SeedableRng}; use std::cmp::Ordering; +use std::mem; fn gen_range(rng: &mut R, min: usize, max: usize) -> usize { let range = max - min; @@ -13,85 +14,171 @@ // Ported from the Java version at: // http://www.cs.princeton.edu/~rs/talks/QuicksortIsOptimal.pdf -// Should be O(n) to O(n log n) -fn do_quicksort( - vector: &mut FocusMut<'_, A>, - left: usize, - right: usize, - cmp: &F, - rng: &mut R, -) where +// There are a couple of modifications made here to make it more performant on the tree structure of +// the Vector. Instead of moving of handling equal and nonequal items in a single pass we make two +// additional passes to find the exact partition places. This allows us to split the focus into +// three correctly sized parts for less than, equal to and greater than items. As a bonus this +// doesn't need to reorder the equal items to the center of the vector. +fn do_quicksort(vector: FocusMut<'_, A>, cmp: &F, rng: &mut R) +where A: Clone, F: Fn(&A, &A) -> Ordering, R: RngCore, { - if right <= left { + if vector.len() <= 1 { return; } - let l = left as isize; - let r = right as isize; - let p = gen_range(rng, left, right + 1) as isize; - let mut l1 = l; - let mut r1 = r; - let mut l2 = l - 1; - let mut r2 = r; - - vector.swap(r as usize, p as usize); - loop { - while l1 != r && vector.pair(l1 as usize, r as usize, |a, b| cmp(a, b)) == Ordering::Less { - l1 += 1; - } - r1 -= 1; - while r1 != r && vector.pair(r as usize, r1 as usize, |a, b| cmp(a, b)) == Ordering::Less { - if r1 == l { - break; - } - r1 -= 1; + // We know there are at least 2 elements here + let pivot_index = gen_range(rng, 0, vector.len()); + let (mut first, mut rest) = vector.split_at(1); + + if pivot_index > 0 { + mem::swap(rest.index_mut(pivot_index - 1), first.index_mut(0)); + } + // Pivot is now always in the first slice + let pivot_item = first.index(0); + + // Find the exact place to put the pivot or pivot-equal items + let mut less_count = 0; + let mut equal_count = 0; + + for index in 0..rest.len() { + let item = rest.index(index); + let comp = cmp(item, pivot_item); + match comp { + Ordering::Less => less_count += 1, + Ordering::Equal => equal_count += 1, + Ordering::Greater => {} } - if l1 >= r1 { - break; + } + + // If by accident we picked the minimum element as a pivot, we just call sort again with the + // rest of the vector. + if less_count == 0 { + do_quicksort(rest, cmp, rng); + return; + } + + // We know here that there is at least one item before the pivot, so we move the minimum to the + // beginning part of the vector. First, however we swap the pivot to the start of the equal + // zone. + less_count -= 1; + equal_count += 1; + let first_item = first.index_mut(0); + mem::swap(first_item, rest.index_mut(less_count)); + for index in 0..rest.len() { + if index == less_count { + // This is the position we swapped the pivot to. We can't move it from its position, and + // we know its not the minimum. + continue; + } + let rest_item = rest.index_mut(index); + if cmp(rest_item, first_item) == Ordering::Less { + mem::swap(first_item, rest_item); } - vector.swap(l1 as usize, r1 as usize); - if l1 != r && vector.pair(l1 as usize, r as usize, |a, b| cmp(a, b)) == Ordering::Equal { - l2 += 1; - vector.swap(l2 as usize, l1 as usize); + } + + // Split the vector up into less_than, equal to and greater than parts. + let (remaining, mut greater_focus) = rest.split_at(less_count + equal_count); + let (mut less_focus, mut equal_focus) = remaining.split_at(less_count); + + let mut less_position = 0; + let mut equal_position = 0; + let mut greater_position = 0; + + while less_position != less_focus.len() || greater_position != greater_focus.len() { + // At start of this loop, equal_position always points to an equal item + let mut equal_swap_side = None; + let equal_item = equal_focus.index(equal_position); + + // Advance the less_position until we find an out of place item + while less_position != less_focus.len() { + let less_item = less_focus.index(less_position); + match cmp(less_item, equal_item) { + Ordering::Equal => { + equal_swap_side = Some(Ordering::Less); + break; + } + Ordering::Greater => { + break; + } + _ => {} + } + less_position += 1; } - if r1 != r && vector.pair(r as usize, r1 as usize, |a, b| cmp(a, b)) == Ordering::Equal { - r2 -= 1; - vector.swap(r1 as usize, r2 as usize); + + // Advance the greater until we find an out of place item + while greater_position != greater_focus.len() { + let greater_item = greater_focus.index(greater_position); + match cmp(greater_item, equal_item) { + Ordering::Less => break, + Ordering::Equal => { + equal_swap_side = Some(Ordering::Greater); + break; + } + _ => {} + } + greater_position += 1; } - } - vector.swap(l1 as usize, r as usize); - r1 = l1 - 1; - l1 += 1; - let mut k = l; - while k < l2 { - vector.swap(k as usize, r1 as usize); - r1 -= 1; - k += 1; - } - k = r - 1; - while k > r2 { - vector.swap(l1 as usize, k as usize); - k -= 1; - l1 += 1; + if let Some(swap_side) = equal_swap_side { + // One of the sides is equal to the pivot, advance the pivot + let item = if swap_side == Ordering::Less { + less_focus.index_mut(less_position) + } else { + greater_focus.index_mut(greater_position) + }; + + // We are guaranteed not to hit the end of the equal focus + while cmp(item, equal_focus.index(equal_position)) == Ordering::Equal { + equal_position += 1; + } + + // Swap the equal position and the desired side, it's important to note that only the + // equals focus is guaranteed to have made progress so we don't advance the side's index + mem::swap(item, equal_focus.index_mut(equal_position)); + } else if less_position != less_focus.len() && greater_position != greater_focus.len() { + // Both sides are out of place and not equal to the pivot, this can only happen if there + // is a greater item in the lesser zone and a lesser item in the greater zone. The + // solution is to swap both sides and advance both side's indices. + debug_assert_ne!( + cmp( + less_focus.index(less_position), + equal_focus.index(equal_position) + ), + Ordering::Equal + ); + debug_assert_ne!( + cmp( + greater_focus.index(greater_position), + equal_focus.index(equal_position) + ), + Ordering::Equal + ); + mem::swap( + less_focus.index_mut(less_position), + greater_focus.index_mut(greater_position), + ); + less_position += 1; + greater_position += 1; + } } - if r1 >= 0 { - do_quicksort(vector, left, r1 as usize, cmp, rng); + // Now we have partitioned both sides correctly, we just have to recurse now + do_quicksort(less_focus, cmp, rng); + if !greater_focus.is_empty() { + do_quicksort(greater_focus, cmp, rng); } - do_quicksort(vector, l1 as usize, right, cmp, rng); } -pub(crate) fn quicksort(vector: &mut FocusMut<'_, A>, left: usize, right: usize, cmp: &F) +pub(crate) fn quicksort(vector: FocusMut<'_, A>, cmp: &F) where A: Clone, F: Fn(&A, &A) -> Ordering, { let mut rng = rand_xoshiro::Xoshiro256Plus::seed_from_u64(0); - do_quicksort(vector, left, right, cmp, &mut rng); + do_quicksort(vector, cmp, &mut rng); } #[cfg(test)] @@ -104,11 +191,11 @@ proptest! { #[test] - fn test_quicksort(ref input in vector(i32::ANY, 0..1000)) { + fn test_quicksort(ref input in vector(i32::ANY, 0..10000)) { let mut vec = input.clone(); let len = vec.len(); if len > 1 { - quicksort(&mut vec.focus_mut(), 0, len - 1, &Ord::cmp); + quicksort(vec.focus_mut(), &Ord::cmp); } assert!(is_sorted(vec)); } diff -Nru cargo-0.44.1/vendor/im-rc/src/tests/vector.rs cargo-0.47.0/vendor/im-rc/src/tests/vector.rs --- cargo-0.44.1/vendor/im-rc/src/tests/vector.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/src/tests/vector.rs 2020-10-01 21:38:28.000000000 +0000 @@ -93,7 +93,7 @@ } Action::SplitLeft(ref index) => { let index = cap_index(expected.len(), *index); - expected.split_off(index); + expected.truncate(index); writeln!(out, "vec.split_off({:?});", index)? } Action::SplitRight(ref index) => { @@ -217,3 +217,16 @@ } } } + +#[test] +fn test_inserts() { + const N: usize = 2000; + let mut v = Vector::new(); + for i in 0..N { + v.insert(v.len() / 2, i); + } + let mut rv: Vec = Vec::new(); + rv.extend((0..N).skip(1).step_by(2)); + rv.extend((0..N).step_by(2).rev()); + assert_eq!(Vector::from_iter(rv.iter().cloned()), v); +} diff -Nru cargo-0.44.1/vendor/im-rc/src/util.rs cargo-0.47.0/vendor/im-rc/src/util.rs --- cargo-0.44.1/vendor/im-rc/src/util.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/src/util.rs 2020-10-01 21:38:28.000000000 +0000 @@ -14,15 +14,9 @@ // The `Ref` type is an alias for either `Rc` or `Arc`, user's choice. // `Arc` without refpool -#[cfg(all(threadsafe, not(feature = "pool")))] +#[cfg(all(threadsafe))] pub(crate) use crate::fakepool::{Arc as PoolRef, Pool, PoolClone, PoolDefault}; -// `Arc` with refpool -#[cfg(all(threadsafe, feature = "pool"))] -pub(crate) type PoolRef = refpool::PoolRef; -#[cfg(all(threadsafe, feature = "pool"))] -pub(crate) type Pool = refpool::Pool; - // `Ref` == `Arc` when threadsafe #[cfg(threadsafe)] pub(crate) type Ref = std::sync::Arc; @@ -33,9 +27,9 @@ // `Rc` with refpool #[cfg(all(not(threadsafe), feature = "pool"))] -pub(crate) type PoolRef = refpool::PoolRef; +pub(crate) type PoolRef = refpool::PoolRef; #[cfg(all(not(threadsafe), feature = "pool"))] -pub(crate) type Pool = refpool::Pool; +pub(crate) type Pool = refpool::Pool; // `Ref` == `Rc` when not threadsafe #[cfg(not(threadsafe))] diff -Nru cargo-0.44.1/vendor/im-rc/src/vector/mod.rs cargo-0.47.0/vendor/im-rc/src/vector/mod.rs --- cargo-0.44.1/vendor/im-rc/src/vector/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/im-rc/src/vector/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -316,6 +316,31 @@ self.len() == 0 } + /// Test whether a vector is currently inlined. + /// + /// Vectors small enough that their contents could be stored entirely inside + /// the space of `std::mem::size_of::>()` bytes are stored inline on + /// the stack instead of allocating any chunks. This method returns `true` if + /// this vector is currently inlined, or `false` if it currently has chunks allocated + /// on the heap. + /// + /// This may be useful in conjunction with [`ptr_eq()`][ptr_eq], which checks if + /// two vectors' heap allocations are the same, and thus will never return `true` + /// for inlined vectors. + /// + /// Time: O(1) + /// + /// [ptr_eq]: #method.ptr_eq + #[inline] + #[must_use] + pub fn is_inline(&self) -> bool { + if let Inline(_, _) = &self.vector { + true + } else { + false + } + } + /// Test whether two vectors refer to the same content in memory. /// /// This uses the following rules to determine equality: @@ -347,8 +372,8 @@ && cmp_chunk(&left.inner_f, &right.inner_f) && cmp_chunk(&left.inner_b, &right.inner_b) && cmp_chunk(&left.outer_b, &right.outer_b) - && (left.middle.is_empty() && right.middle.is_empty()) - || Ref::ptr_eq(&left.middle, &right.middle) + && ((left.middle.is_empty() && right.middle.is_empty()) + || Ref::ptr_eq(&left.middle, &right.middle)) } _ => false, } @@ -1475,7 +1500,7 @@ { let len = self.len(); if len > 1 { - sort::quicksort(&mut self.focus_mut(), 0, len - 1, &cmp); + sort::quicksort(self.focus_mut(), &cmp); } } @@ -1732,8 +1757,8 @@ && cmp_chunk(&left.inner_f, &right.inner_f) && cmp_chunk(&left.inner_b, &right.inner_b) && cmp_chunk(&left.outer_b, &right.outer_b) - && (left.middle.is_empty() && right.middle.is_empty()) - || Ref::ptr_eq(&left.middle, &right.middle) + && ((left.middle.is_empty() && right.middle.is_empty()) + || Ref::ptr_eq(&left.middle, &right.middle)) { return true; } @@ -2481,6 +2506,33 @@ assert_eq!(vec.len(), rev_vec.len()); } + #[test] + fn issue_131() { + let smol = std::iter::repeat(42).take(64).collect::>(); + let mut smol2 = smol.clone(); + assert!(smol.ptr_eq(&smol2)); + smol2.set(63, 420); + assert!(!smol.ptr_eq(&smol2)); + + let huge = std::iter::repeat(42).take(65).collect::>(); + let mut huge2 = huge.clone(); + assert!(huge.ptr_eq(&huge2)); + huge2.set(63, 420); + assert!(!huge.ptr_eq(&huge2)); + } + + #[test] + fn ptr_eq() { + for len in 32..256 { + let input = std::iter::repeat(42).take(len).collect::>(); + let mut inp2 = input.clone(); + assert!(input.ptr_eq(&inp2)); + inp2.set(len - 1, 98); + assert_ne!(inp2.get(len - 1), input.get(len - 1)); + assert!(!input.ptr_eq(&inp2), len); + } + } + proptest! { #[test] fn iter(ref vec in vec(i32::ANY, 0..1000)) { diff -Nru cargo-0.44.1/vendor/itoa/.cargo-checksum.json cargo-0.47.0/vendor/itoa/.cargo-checksum.json --- cargo-0.44.1/vendor/itoa/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/itoa/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e"} \ No newline at end of file +{"files":{},"package":"dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/itoa/Cargo.toml cargo-0.47.0/vendor/itoa/Cargo.toml --- cargo-0.44.1/vendor/itoa/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/itoa/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "itoa" -version = "0.4.5" +version = "0.4.6" authors = ["David Tolnay "] exclude = ["performance.png"] description = "Fast functions for printing integer primitives to an io::Write" @@ -21,10 +21,10 @@ categories = ["value-formatting"] license = "MIT OR Apache-2.0" repository = "https://github.com/dtolnay/itoa" +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] [features] default = ["std"] i128 = [] std = [] -[badges.travis-ci] -repository = "dtolnay/itoa" diff -Nru cargo-0.44.1/vendor/itoa/README.md cargo-0.47.0/vendor/itoa/README.md --- cargo-0.44.1/vendor/itoa/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/itoa/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -1,9 +1,10 @@ itoa ==== -[![Build Status](https://api.travis-ci.org/dtolnay/itoa.svg?branch=master)](https://travis-ci.org/dtolnay/itoa) -[![Latest Version](https://img.shields.io/crates/v/itoa.svg)](https://crates.io/crates/itoa) -[![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/itoa) +[github](https://github.com/dtolnay/itoa) +[crates.io](https://crates.io/crates/itoa) +[docs.rs](https://docs.rs/itoa) +[build status](https://github.com/dtolnay/itoa/actions?query=branch%3Amaster) This crate provides fast functions for printing integer primitives to an [`io::Write`] or a [`fmt::Write`]. The implementation comes straight from diff -Nru cargo-0.44.1/vendor/itoa/src/lib.rs cargo-0.47.0/vendor/itoa/src/lib.rs --- cargo-0.44.1/vendor/itoa/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/itoa/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,3 +1,11 @@ +//! [![github]](https://github.com/dtolnay/itoa) [![crates-io]](https://crates.io/crates/itoa) [![docs-rs]](https://docs.rs/itoa) +//! +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust +//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logoColor=white&logo=data:image/svg+xml;base64,PHN2ZyByb2xlPSJpbWciIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDUxMiA1MTIiPjxwYXRoIGZpbGw9IiNmNWY1ZjUiIGQ9Ik00ODguNiAyNTAuMkwzOTIgMjE0VjEwNS41YzAtMTUtOS4zLTI4LjQtMjMuNC0zMy43bC0xMDAtMzcuNWMtOC4xLTMuMS0xNy4xLTMuMS0yNS4zIDBsLTEwMCAzNy41Yy0xNC4xIDUuMy0yMy40IDE4LjctMjMuNCAzMy43VjIxNGwtOTYuNiAzNi4yQzkuMyAyNTUuNSAwIDI2OC45IDAgMjgzLjlWMzk0YzAgMTMuNiA3LjcgMjYuMSAxOS45IDMyLjJsMTAwIDUwYzEwLjEgNS4xIDIyLjEgNS4xIDMyLjIgMGwxMDMuOS01MiAxMDMuOSA1MmMxMC4xIDUuMSAyMi4xIDUuMSAzMi4yIDBsMTAwLTUwYzEyLjItNi4xIDE5LjktMTguNiAxOS45LTMyLjJWMjgzLjljMC0xNS05LjMtMjguNC0yMy40LTMzLjd6TTM1OCAyMTQuOGwtODUgMzEuOXYtNjguMmw4NS0zN3Y3My4zek0xNTQgMTA0LjFsMTAyLTM4LjIgMTAyIDM4LjJ2LjZsLTEwMiA0MS40LTEwMi00MS40di0uNnptODQgMjkxLjFsLTg1IDQyLjV2LTc5LjFsODUtMzguOHY3NS40em0wLTExMmwtMTAyIDQxLjQtMTAyLTQxLjR2LS42bDEwMi0zOC4yIDEwMiAzOC4ydi42em0yNDAgMTEybC04NSA0Mi41di03OS4xbDg1LTM4Ljh2NzUuNHptMC0xMTJsLTEwMiA0MS40LTEwMi00MS40di0uNmwxMDItMzguMiAxMDIgMzguMnYuNnoiPjwvcGF0aD48L3N2Zz4K +//! +//!
+//! //! This crate provides fast functions for printing integer primitives to an //! [`io::Write`] or a [`fmt::Write`]. The implementation comes straight from //! [libcore] but avoids the performance penalty of going through @@ -48,7 +56,7 @@ //! } //! ``` -#![doc(html_root_url = "https://docs.rs/itoa/0.4.5")] +#![doc(html_root_url = "https://docs.rs/itoa/0.4.6")] #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(feature = "cargo-clippy", allow(renamed_and_removed_lints))] #![cfg_attr( diff -Nru cargo-0.44.1/vendor/lazycell/.cargo-checksum.json cargo-0.47.0/vendor/lazycell/.cargo-checksum.json --- cargo-0.44.1/vendor/lazycell/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/lazycell/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f"} \ No newline at end of file +{"files":{},"package":"830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/lazycell/Cargo.toml cargo-0.47.0/vendor/lazycell/Cargo.toml --- cargo-0.44.1/vendor/lazycell/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/lazycell/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,7 +12,7 @@ [package] name = "lazycell" -version = "1.2.1" +version = "1.3.0" authors = ["Alex Crichton ", "Nikita Pekin "] include = ["CHANGELOG.md", "Cargo.toml", "LICENSE-MIT", "LICENSE-APACHE", "README.md", "src/**/*.rs"] description = "A library providing a lazily filled Cell struct" @@ -21,3 +21,14 @@ keywords = ["lazycell", "lazy", "cell", "library"] license = "MIT/Apache-2.0" repository = "https://github.com/indiv0/lazycell" +[dependencies.clippy] +version = "0.0" +optional = true + +[dependencies.serde] +version = "^1" +optional = true + +[features] +nightly = [] +nightly-testing = ["clippy", "nightly"] diff -Nru cargo-0.44.1/vendor/lazycell/CHANGELOG.md cargo-0.47.0/vendor/lazycell/CHANGELOG.md --- cargo-0.44.1/vendor/lazycell/CHANGELOG.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/lazycell/CHANGELOG.md 2020-10-01 21:38:28.000000000 +0000 @@ -1,3 +1,22 @@ +
+## v1.3.0 (2020-08-12) + + +#### Bug Fixes + +* Add custom `impl Default` to support non-Default-able `` types ([b49f4eab](https://github.com/indiv0/lazycell/commit/b49f4eabec49c0a5146ef01017c2506a3c357180)) +* **lazycell:** Fix unsound aliasing in `LazyCell::fill` ([e789ac1a](https://github.com/indiv0/lazycell/commit/e789ac1a99010ad79c2d09c761fec6d67053647d), closes [#98](https://github.com/indiv0/lazycell/issues/98)) + +#### Features + +* Implement serde support ([e728a0b6](https://github.com/indiv0/lazycell/commit/e728a0b680e607b793a81b5af7bf7f1d2c0eb5e5)) + +#### Documentation + +* fix typo ([5f5ba9d5](https://github.com/indiv0/lazycell/commit/5f5ba9d5ac3364f8376c0c872c2e5094974385ba)) + + + ## v1.2.1 (2018-12-03) diff -Nru cargo-0.44.1/vendor/lazycell/debian/patches/no-clippy.patch cargo-0.47.0/vendor/lazycell/debian/patches/no-clippy.patch --- cargo-0.44.1/vendor/lazycell/debian/patches/no-clippy.patch 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/lazycell/debian/patches/no-clippy.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ ---- a/Cargo.toml -+++ b/Cargo.toml -@@ -23,8 +23 @@ license = "MIT/Apache-2.0" - repository = "https://github.com/indiv0/lazycell" --[dependencies.clippy] --version = "0.0" --optional = true -- --[features] --nightly = [] --nightly-testing = ["clippy", "nightly"] diff -Nru cargo-0.44.1/vendor/lazycell/debian/patches/series cargo-0.47.0/vendor/lazycell/debian/patches/series --- cargo-0.44.1/vendor/lazycell/debian/patches/series 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/lazycell/debian/patches/series 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -no-clippy.patch diff -Nru cargo-0.44.1/vendor/lazycell/.pc/applied-patches cargo-0.47.0/vendor/lazycell/.pc/applied-patches --- cargo-0.44.1/vendor/lazycell/.pc/applied-patches 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/lazycell/.pc/applied-patches 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -no-clippy.patch diff -Nru cargo-0.44.1/vendor/lazycell/.pc/no-clippy.patch/Cargo.toml cargo-0.47.0/vendor/lazycell/.pc/no-clippy.patch/Cargo.toml --- cargo-0.44.1/vendor/lazycell/.pc/no-clippy.patch/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/lazycell/.pc/no-clippy.patch/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "lazycell" -version = "1.2.1" -authors = ["Alex Crichton ", "Nikita Pekin "] -include = ["CHANGELOG.md", "Cargo.toml", "LICENSE-MIT", "LICENSE-APACHE", "README.md", "src/**/*.rs"] -description = "A library providing a lazily filled Cell struct" -documentation = "http://indiv0.github.io/lazycell/lazycell/" -readme = "README.md" -keywords = ["lazycell", "lazy", "cell", "library"] -license = "MIT/Apache-2.0" -repository = "https://github.com/indiv0/lazycell" -[dependencies.clippy] -version = "0.0" -optional = true - -[features] -nightly = [] -nightly-testing = ["clippy", "nightly"] diff -Nru cargo-0.44.1/vendor/lazycell/.pc/.quilt_patches cargo-0.47.0/vendor/lazycell/.pc/.quilt_patches --- cargo-0.44.1/vendor/lazycell/.pc/.quilt_patches 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/lazycell/.pc/.quilt_patches 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -debian/patches diff -Nru cargo-0.44.1/vendor/lazycell/.pc/.quilt_series cargo-0.47.0/vendor/lazycell/.pc/.quilt_series --- cargo-0.44.1/vendor/lazycell/.pc/.quilt_series 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/lazycell/.pc/.quilt_series 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -series diff -Nru cargo-0.44.1/vendor/lazycell/.pc/.version cargo-0.47.0/vendor/lazycell/.pc/.version --- cargo-0.44.1/vendor/lazycell/.pc/.version 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/lazycell/.pc/.version 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -2 diff -Nru cargo-0.44.1/vendor/lazycell/README.md cargo-0.47.0/vendor/lazycell/README.md --- cargo-0.44.1/vendor/lazycell/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/lazycell/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -7,6 +7,7 @@ + cargo-downloads-badge api-docs-badge crates-io license-badge @@ -30,7 +31,7 @@ ```toml [dependencies] -lazycell = "1.2" +lazycell = "1.3" ``` And in your `lib.rs` or `main.rs`: diff -Nru cargo-0.44.1/vendor/lazycell/src/lib.rs cargo-0.47.0/vendor/lazycell/src/lib.rs --- cargo-0.44.1/vendor/lazycell/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/lazycell/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,5 +1,5 @@ // Original work Copyright (c) 2014 The Rust Project Developers -// Modified work Copyright (c) 2016-2018 Nikita Pekin and the lazycell contributors +// Modified work Copyright (c) 2016-2020 Nikita Pekin and the lazycell contributors // See the README.md file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 { inner: UnsafeCell>, } @@ -71,12 +76,13 @@ /// Put a value into this cell. /// - /// This function will return `Err(value)` is the cell is already full. + /// This function will return `Err(value)` if the cell is already full. pub fn fill(&self, value: T) -> Result<(), T> { - let slot = unsafe { &mut *self.inner.get() }; + let slot = unsafe { &*self.inner.get() }; if slot.is_some() { return Err(value); } + let slot = unsafe { &mut *self.inner.get() }; *slot = Some(value); Ok(()) @@ -215,6 +221,12 @@ } } +impl Default for LazyCell { + fn default() -> Self { + Self::new() + } +} + impl Clone for LazyCell { /// Create a clone of this `LazyCell` /// @@ -232,7 +244,7 @@ const SOME: usize = 2; /// A lazily filled and thread-safe `Cell`, with frozen contents. -#[derive(Debug, Default)] +#[derive(Debug)] pub struct AtomicLazyCell { inner: UnsafeCell>, state: AtomicUsize, @@ -252,7 +264,7 @@ /// Put a value into this cell. /// - /// This function will return `Err(value)` is the cell is already full. + /// This function will return `Err(value)` if the cell is already full. pub fn fill(&self, t: T) -> Result<(), T> { if NONE != self.state.compare_and_swap(NONE, LOCK, Ordering::Acquire) { return Err(t); @@ -325,6 +337,12 @@ } } +impl Default for AtomicLazyCell { + fn default() -> Self { + Self::new() + } +} + impl Clone for AtomicLazyCell { /// Create a clone of this `AtomicLazyCell` /// @@ -646,4 +664,17 @@ assert_eq!(clone2.borrow(), Some(&4)); assert_eq!(cell.borrow(), Some(&2)); } + + #[test] + fn default() { + #[derive(Default)] + struct Defaultable; + struct NonDefaultable; + + let _: LazyCell = LazyCell::default(); + let _: LazyCell = LazyCell::default(); + + let _: AtomicLazyCell = AtomicLazyCell::default(); + let _: AtomicLazyCell = AtomicLazyCell::default(); + } } diff -Nru cargo-0.44.1/vendor/lazycell/src/serde_impl.rs cargo-0.47.0/vendor/lazycell/src/serde_impl.rs --- cargo-0.44.1/vendor/lazycell/src/serde_impl.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/lazycell/src/serde_impl.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,86 @@ +// Copyright (c) 2020 Nikita Pekin and the lazycell contributors +// See the README.md file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms.use serde::ser::{Serialize, Serializer}; +use serde::ser::{Serialize, Serializer}; +use serde::de::{self, Deserialize, Deserializer, Visitor}; + +use std::fmt; +use std::marker::PhantomData; + +use super::{LazyCell, AtomicLazyCell}; + +impl Serialize for LazyCell { + fn serialize(&self, serializer: S) -> Result { + match self.borrow() { + Some(val) => serializer.serialize_some(val), + None => serializer.serialize_none() + } + } +} + + +impl Serialize for AtomicLazyCell { + fn serialize(&self, serializer: S) -> Result { + match self.borrow() { + Some(val) => serializer.serialize_some(val), + None => serializer.serialize_none() + } + } +} + +struct LazyCellVisitor(PhantomData<*const T>); +impl<'de, T: Deserialize<'de>> Visitor<'de> for LazyCellVisitor { + type Value = LazyCell; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a LazyCell") + } + + fn visit_some>(self, deserializer: D) -> Result { + let mut cell = LazyCell::new(); + cell.replace(T::deserialize(deserializer)?); + Ok(cell) + } + + fn visit_none(self) -> Result { + Ok(LazyCell::new()) + } +} + +impl<'de, T: Deserialize<'de>> Deserialize<'de> for LazyCell { + fn deserialize>(deserializer: D) -> Result { + deserializer.deserialize_option(LazyCellVisitor(PhantomData)) + } +} + + +struct AtomicLazyCellVisitor(PhantomData<*const T>); +impl<'de, T: Deserialize<'de>> Visitor<'de> for AtomicLazyCellVisitor { + type Value = AtomicLazyCell; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("an AtomicLazyCell") + } + + fn visit_some>(self, deserializer: D) -> Result { + let mut cell = AtomicLazyCell::new(); + cell.replace(T::deserialize(deserializer)?); + Ok(cell) + } + + fn visit_none(self) -> Result { + Ok(AtomicLazyCell::new()) + } +} + + +impl<'de, T: Deserialize<'de>> Deserialize<'de> for AtomicLazyCell { + fn deserialize>(deserializer: D) -> Result { + deserializer.deserialize_option(AtomicLazyCellVisitor(PhantomData)) + } +} diff -Nru cargo-0.44.1/vendor/libc/build.rs cargo-0.47.0/vendor/libc/build.rs --- cargo-0.44.1/vendor/libc/build.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -65,9 +65,10 @@ println!("cargo:rustc-cfg=libc_core_cvoid"); } - // Rust >= 1.33 supports repr(packed(N)) + // Rust >= 1.33 supports repr(packed(N)) and cfg(target_vendor). if rustc_minor_ver >= 33 || rustc_dep_of_std { println!("cargo:rustc-cfg=libc_packedN"); + println!("cargo:rustc-cfg=libc_cfg_target_vendor"); } // #[thread_local] is currently unstable diff -Nru cargo-0.44.1/vendor/libc/.cargo-checksum.json cargo-0.47.0/vendor/libc/.cargo-checksum.json --- cargo-0.44.1/vendor/libc/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49"} \ No newline at end of file +{"files":{},"package":"aa7087f49d294270db4e1928fc110c976cd4b9e5a16348e0a1df09afa99e6c98"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/libc/Cargo.toml cargo-0.47.0/vendor/libc/Cargo.toml --- cargo-0.44.1/vendor/libc/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "libc" -version = "0.2.71" +version = "0.2.78" authors = ["The Rust Project Developers"] build = "build.rs" exclude = ["/ci/*", "/azure-pipelines.yml"] diff -Nru cargo-0.44.1/vendor/libc/CONTRIBUTING.md cargo-0.47.0/vendor/libc/CONTRIBUTING.md --- cargo-0.44.1/vendor/libc/CONTRIBUTING.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/CONTRIBUTING.md 2020-10-01 21:38:28.000000000 +0000 @@ -53,14 +53,11 @@ crates.io! The only next step is to bump the version of libc and then publish it. If you'd like to get a release out ASAP you can follow these steps: -1. Update the version number in `Cargo.toml`, you'll just be bumping the patch - version number. -2. Run `cargo update` to regenerate the lockfile to encode your version bump in - the lock file. You may pull in some other updated dependencies, that's ok. -3. Send a PR to this repository. It should [look like this][example], but it'd +1. Increment the patch version number in `Cargo.toml`. +1. Send a PR to this repository. It should [look like this][example], but it'd also be nice to fill out the description with a small rationale for the release (any rationale is ok though!) -4. Once merged the release will be tagged and published by one of the libc crate +1. Once merged, the release will be tagged and published by one of the libc crate maintainers. [example]: https://github.com/rust-lang/libc/pull/583 diff -Nru cargo-0.44.1/vendor/libc/README.md cargo-0.47.0/vendor/libc/README.md --- cargo-0.44.1/vendor/libc/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -1,7 +1,6 @@ -[![Azure Status]][Azure] [![Cirrus CI Status]][Cirrus CI] [![Latest Version]][crates.io] [![Documentation]][docs.rs] ![License] +# libc - Raw FFI bindings to platforms' system libraries -libc - Raw FFI bindings to platforms' system libraries -==== +[![Azure Status]][Azure] [![Cirrus CI Status]][Cirrus CI] [![Latest Version]][crates.io] [![Documentation]][docs.rs] ![License] `libc` provides all of the definitions necessary to easily interoperate with C code (or "C-like" code) on each of the platforms that Rust supports. This @@ -29,14 +28,14 @@ ## Features * `std`: by default `libc` links to the standard library. Disable this - feature remove this dependency and be able to use `libc` in `#![no_std]` + feature to remove this dependency and be able to use `libc` in `#![no_std]` crates. * `extra_traits`: all `struct`s implemented in `libc` are `Copy` and `Clone`. This feature derives `Debug`, `Eq`, `Hash`, and `PartialEq`. * `const-extern-fn`: Changes some `extern fn`s into `const extern fn`s. - This features requires a nightly rustc + This feature requires a nightly rustc. * **deprecated**: `use_std` is deprecated, and is equivalent to `std`. @@ -53,6 +52,7 @@ | `extra_traits` | 1.25.0 | | `core::ffi::c_void` | 1.30.0 | | `repr(packed(N))` | 1.33.0 | +| `cfg(target_vendor)` | 1.33.0 | ## Platform support @@ -71,10 +71,10 @@ This project is licensed under either of * [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) - ([LICENSE-APACHE](LICENSE-APACHE)) + ([LICENSE-APACHE](https://github.com/rust-lang/libc/blob/master/LICENSE-APACHE)) * [MIT License](https://opensource.org/licenses/MIT) - ([LICENSE-MIT](LICENSE-MIT)) + ([LICENSE-MIT](https://github.com/rust-lang/libc/blob/master/LICENSE-MIT)) at your option. @@ -83,7 +83,7 @@ We welcome all people who want to contribute. Please see the [contributing instructions] for more information. -[contributing instructions]: CONTRIBUTING.md +[contributing instructions]: https://github.com/rust-lang/libc/blob/master/CONTRIBUTING.md Contributions in any form (issues, pull requests, etc.) to this project must adhere to Rust's [Code of Conduct]. diff -Nru cargo-0.44.1/vendor/libc/src/fuchsia/mod.rs cargo-0.47.0/vendor/libc/src/fuchsia/mod.rs --- cargo-0.44.1/vendor/libc/src/fuchsia/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/fuchsia/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -3158,42 +3158,6 @@ } } - pub fn WIFSTOPPED(status: ::c_int) -> bool { - (status & 0xff) == 0x7f - } - - pub fn WSTOPSIG(status: ::c_int) -> ::c_int { - (status >> 8) & 0xff - } - - pub fn WIFCONTINUED(status: ::c_int) -> bool { - status == 0xffff - } - - pub fn WIFSIGNALED(status: ::c_int) -> bool { - ((status & 0x7f) + 1) as i8 >= 2 - } - - pub fn WTERMSIG(status: ::c_int) -> ::c_int { - status & 0x7f - } - - pub fn WIFEXITED(status: ::c_int) -> bool { - (status & 0x7f) == 0 - } - - pub fn WEXITSTATUS(status: ::c_int) -> ::c_int { - (status >> 8) & 0xff - } - - pub fn WCOREDUMP(status: ::c_int) -> bool { - (status & 0x80) != 0 - } - - pub fn QCMD(cmd: ::c_int, type_: ::c_int) -> ::c_int { - (cmd << 8) | (type_ & 0x00ff) - } - pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { for slot in cpuset.bits.iter_mut() { *slot = 0; @@ -3250,6 +3214,98 @@ dev |= (minor & 0xffffff00) << 12; dev } + + pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { + cmsg.offset(1) as *mut c_uchar + } + + pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) + -> *mut cmsghdr + { + if ((*cmsg).cmsg_len as ::size_t) < ::mem::size_of::() { + 0 as *mut cmsghdr + } else if __CMSG_NEXT(cmsg).add(::mem::size_of::()) + >= __MHDR_END(mhdr) { + 0 as *mut cmsghdr + } else { + __CMSG_NEXT(cmsg).cast() + } + } + + pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr { + if (*mhdr).msg_controllen as ::size_t >= ::mem::size_of::() { + (*mhdr).msg_control.cast() + } else { + 0 as *mut cmsghdr + } + } + + pub fn CMSG_ALIGN(len: ::size_t) -> ::size_t { + (len + ::mem::size_of::<::size_t>() - 1) + & !(::mem::size_of::<::size_t>() - 1) + } + + pub fn CMSG_SPACE(len: ::c_uint) -> ::c_uint { + (CMSG_ALIGN(len as ::size_t) + CMSG_ALIGN(::mem::size_of::())) + as ::c_uint + } + + pub fn CMSG_LEN(len: ::c_uint) -> ::c_uint { + (CMSG_ALIGN(::mem::size_of::()) + len as ::size_t) as ::c_uint + } +} + +safe_f! { + pub {const} fn WIFSTOPPED(status: ::c_int) -> bool { + (status & 0xff) == 0x7f + } + + pub {const} fn WSTOPSIG(status: ::c_int) -> ::c_int { + (status >> 8) & 0xff + } + + pub {const} fn WIFCONTINUED(status: ::c_int) -> bool { + status == 0xffff + } + + pub {const} fn WIFSIGNALED(status: ::c_int) -> bool { + ((status & 0x7f) + 1) as i8 >= 2 + } + + pub {const} fn WTERMSIG(status: ::c_int) -> ::c_int { + status & 0x7f + } + + pub {const} fn WIFEXITED(status: ::c_int) -> bool { + (status & 0x7f) == 0 + } + + pub {const} fn WEXITSTATUS(status: ::c_int) -> ::c_int { + (status >> 8) & 0xff + } + + pub {const} fn WCOREDUMP(status: ::c_int) -> bool { + (status & 0x80) != 0 + } + + pub {const} fn QCMD(cmd: ::c_int, type_: ::c_int) -> ::c_int { + (cmd << 8) | (type_ & 0x00ff) + } +} + +fn __CMSG_LEN(cmsg: *const cmsghdr) -> ::ssize_t { + ((unsafe { (*cmsg).cmsg_len as ::size_t } + ::mem::size_of::<::c_long>() + - 1) + & !(::mem::size_of::<::c_long>() - 1)) as ::ssize_t +} + +fn __CMSG_NEXT(cmsg: *const cmsghdr) -> *mut c_uchar { + (unsafe { cmsg.offset(__CMSG_LEN(cmsg)) }) as *mut c_uchar +} + +fn __MHDR_END(mhdr: *const msghdr) -> *mut c_uchar { + unsafe { (*mhdr).msg_control.offset((*mhdr).msg_controllen as isize) } + .cast() } // EXTERN_FN diff -Nru cargo-0.44.1/vendor/libc/src/lib.rs cargo-0.47.0/vendor/libc/src/lib.rs --- cargo-0.44.1/vendor/libc/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -5,13 +5,13 @@ //! [pd]: https://rust-lang.github.io/libc/#platform-specific-documentation #![crate_name = "libc"] #![crate_type = "rlib"] -// FIXME: Remove this and redundant_semicolon once renamed lint reaches stable. -#![allow(renamed_and_removed_lints)] #![allow( + renamed_and_removed_lints, // Keep this order. + unknown_lints, // Keep this order. bad_style, overflowing_literals, improper_ctypes, - unknown_lints, + // This lint is renamed but we run CI for old stable rustc so should be here. redundant_semicolon, redundant_semicolons )] @@ -19,7 +19,7 @@ // Attributes needed when building as part of the standard library #![cfg_attr( feature = "rustc-dep-of-std", - feature(cfg_target_vendor, link_cfg, no_core) + feature(cfg_target_vendor, link_cfg, no_core, static_nobundle) )] #![cfg_attr(libc_thread_local, feature(thread_local))] // Enable extra lints: diff -Nru cargo-0.44.1/vendor/libc/src/macros.rs cargo-0.47.0/vendor/libc/src/macros.rs --- cargo-0.44.1/vendor/libc/src/macros.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/macros.rs 2020-10-01 21:38:28.000000000 +0000 @@ -121,6 +121,36 @@ ); } +#[allow(unused_macros)] +macro_rules! e { + ($($(#[$attr:meta])* pub enum $i:ident { $($field:tt)* })*) => ($( + __item! { + #[cfg_attr(feature = "extra_traits", derive(Debug, Eq, Hash, PartialEq))] + $(#[$attr])* + pub enum $i { $($field)* } + } + impl ::Copy for $i {} + impl ::Clone for $i { + fn clone(&self) -> $i { *self } + } + )*); +} + +#[allow(unused_macros)] +macro_rules! s_paren { + ($($(#[$attr:meta])* pub struct $i:ident ( $($field:tt)* ); )* ) => ($( + __item! { + #[cfg_attr(feature = "extra_traits", derive(Debug, Eq, Hash, PartialEq))] + $(#[$attr])* + pub struct $i ( $($field)* ); + } + impl ::Copy for $i {} + impl ::Clone for $i { + fn clone(&self) -> $i { *self } + } + )*); +} + // This is a pretty horrible hack to allow us to conditionally mark // some functions as 'const', without requiring users of this macro // to care about the "const-extern-fn" feature. @@ -166,6 +196,21 @@ } #[allow(unused_macros)] + macro_rules! safe_f { + ($(pub $({$constness:ident})* fn $i:ident( + $($arg:ident: $argty:ty),* + ) -> $ret:ty { + $($body:stmt);* + })*) => ($( + #[inline] + pub $($constness)* extern fn $i($($arg: $argty),* + ) -> $ret { + $($body);* + } + )*) + } + + #[allow(unused_macros)] macro_rules! const_fn { ($($({$constness:ident})* fn $i:ident( $($arg:ident: $argty:ty),* @@ -193,6 +238,21 @@ ) -> $ret { $($body);* } + )*) + } + + #[allow(unused_macros)] + macro_rules! safe_f { + ($(pub $({$constness:ident})* fn $i:ident( + $($arg:ident: $argty:ty),* + ) -> $ret:ty { + $($body:stmt);* + })*) => ($( + #[inline] + pub extern fn $i($($arg: $argty),* + ) -> $ret { + $($body);* + } )*) } diff -Nru cargo-0.44.1/vendor/libc/src/psp.rs cargo-0.47.0/vendor/libc/src/psp.rs --- cargo-0.44.1/vendor/libc/src/psp.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/psp.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,8 @@ //! PSP C type definitions +//! +//! These type declarations are not enough, as they must be ultimately resolved +//! by the linker. Crates that use these definitions must, somewhere in the +//! crate graph, include a stub provider crate such as the `psp` crate. pub type c_schar = i8; pub type c_uchar = u8; @@ -23,9 +27,6 @@ pub type c_long = i64; pub type c_ulong = u64; -pub const INT_MIN: c_int = -2147483648; -pub const INT_MAX: c_int = 2147483647; - cfg_if! { if #[cfg(libc_core_cvoid)] { pub use ::ffi::c_void; @@ -45,3 +46,4671 @@ } } } + +pub type SceKernelVTimerHandler = unsafe extern "C" fn( + uid: SceUid, + arg1: *mut SceKernelSysClock, + arg2: *mut SceKernelSysClock, + arg3: *mut c_void, +) -> u32; + +pub type SceKernelVTimerHandlerWide = unsafe extern "C" fn( + uid: SceUid, + arg1: i64, + arg2: i64, + arg3: *mut c_void, +) -> u32; + +pub type SceKernelThreadEventHandler = + unsafe extern "C" fn(mask: i32, thid: SceUid, common: *mut c_void) -> i32; + +pub type SceKernelAlarmHandler = + unsafe extern "C" fn(common: *mut c_void) -> u32; + +pub type SceKernelCallbackFunction = + unsafe extern "C" fn(arg1: i32, arg2: i32, arg: *mut c_void) -> i32; + +pub type SceKernelThreadEntry = + unsafe extern "C" fn(args: usize, argp: *mut c_void) -> i32; + +pub type PowerCallback = extern "C" fn(unknown: i32, power_info: i32); + +pub type IoPermissions = i32; + +pub type UmdCallback = fn(unknown: i32, event: i32) -> i32; + +pub type SceMpegRingbufferCb = ::Option< + unsafe extern "C" fn( + data: *mut c_void, + num_packets: i32, + param: *mut c_void, + ) -> i32, +>; + +pub type GuCallback = ::Option; +pub type GuSwapBuffersCallback = ::Option; + +pub type SceNetAdhocctlHandler = ::Option; + +pub type AdhocMatchingCallback = ::Option< + unsafe extern "C" fn( + matching_id: i32, + event: i32, + mac: *mut u8, + opt_len: i32, + opt_data: *mut c_void, + ), +>; + +pub type SceNetApctlHandler = ::Option< + unsafe extern "C" fn( + oldState: i32, + newState: i32, + event: i32, + error: i32, + pArg: *mut c_void, + ), +>; + +pub type HttpMallocFunction = + ::Option *mut c_void>; +pub type HttpReallocFunction = + ::Option *mut c_void>; +pub type HttpFreeFunction = ::Option; +pub type HttpPasswordCB = ::Option< + unsafe extern "C" fn( + request: i32, + auth_type: HttpAuthType, + realm: *const u8, + username: *mut u8, + password: *mut u8, + need_entity: i32, + entity_body: *mut *mut u8, + entity_size: *mut usize, + save: *mut i32, + ) -> i32, +>; + +pub type socklen_t = u32; + +e! { + #[repr(u32)] + pub enum AudioFormat { + Stereo = 0, + Mono = 0x10, + } + + #[repr(u32)] + pub enum DisplayMode { + Lcd = 0, + } + + #[repr(u32)] + pub enum DisplayPixelFormat { + Psm5650 = 0, + Psm5551 = 1, + Psm4444 = 2, + Psm8888 = 3, + } + + #[repr(u32)] + pub enum DisplaySetBufSync { + Immediate = 0, + NextFrame = 1, + } + + #[repr(i32)] + pub enum AudioOutputFrequency { + Khz48 = 48000, + Khz44_1 = 44100, + Khz32 = 32000, + Khz24 = 24000, + Khz22_05 = 22050, + Khz16 = 16000, + Khz12 = 12000, + Khz11_025 = 11025, + Khz8 = 8000, + } + + #[repr(i32)] + pub enum AudioInputFrequency { + Khz44_1 = 44100, + Khz22_05 = 22050, + Khz11_025 = 11025, + } + + #[repr(u32)] + pub enum CtrlMode { + Digital = 0, + Analog, + } + + #[repr(i32)] + pub enum GeMatrixType { + Bone0 = 0, + Bone1, + Bone2, + Bone3, + Bone4, + Bone5, + Bone6, + Bone7, + World, + View, + Projection, + TexGen, + } + + #[repr(i32)] + pub enum GeListState { + Done = 0, + Queued, + DrawingDone, + StallReached, + CancelDone, + } + + #[repr(u8)] + pub enum GeCommand { + Nop = 0, + Vaddr = 0x1, + Iaddr = 0x2, + Prim = 0x4, + Bezier = 0x5, + Spline = 0x6, + BoundingBox = 0x7, + Jump = 0x8, + BJump = 0x9, + Call = 0xa, + Ret = 0xb, + End = 0xc, + Signal = 0xe, + Finish = 0xf, + Base = 0x10, + VertexType = 0x12, + OffsetAddr = 0x13, + Origin = 0x14, + Region1 = 0x15, + Region2 = 0x16, + LightingEnable = 0x17, + LightEnable0 = 0x18, + LightEnable1 = 0x19, + LightEnable2 = 0x1a, + LightEnable3 = 0x1b, + DepthClampEnable = 0x1c, + CullFaceEnable = 0x1d, + TextureMapEnable = 0x1e, + FogEnable = 0x1f, + DitherEnable = 0x20, + AlphaBlendEnable = 0x21, + AlphaTestEnable = 0x22, + ZTestEnable = 0x23, + StencilTestEnable = 0x24, + AntiAliasEnable = 0x25, + PatchCullEnable = 0x26, + ColorTestEnable = 0x27, + LogicOpEnable = 0x28, + BoneMatrixNumber = 0x2a, + BoneMatrixData = 0x2b, + MorphWeight0 = 0x2c, + MorphWeight1 = 0x2d, + MorphWeight2 = 0x2e, + MorphWeight3 = 0x2f, + MorphWeight4 = 0x30, + MorphWeight5 = 0x31, + MorphWeight6 = 0x32, + MorphWeight7 = 0x33, + PatchDivision = 0x36, + PatchPrimitive = 0x37, + PatchFacing = 0x38, + WorldMatrixNumber = 0x3a, + WorldMatrixData = 0x3b, + ViewMatrixNumber = 0x3c, + ViewMatrixData = 0x3d, + ProjMatrixNumber = 0x3e, + ProjMatrixData = 0x3f, + TGenMatrixNumber = 0x40, + TGenMatrixData = 0x41, + ViewportXScale = 0x42, + ViewportYScale = 0x43, + ViewportZScale = 0x44, + ViewportXCenter = 0x45, + ViewportYCenter = 0x46, + ViewportZCenter = 0x47, + TexScaleU = 0x48, + TexScaleV = 0x49, + TexOffsetU = 0x4a, + TexOffsetV = 0x4b, + OffsetX = 0x4c, + OffsetY = 0x4d, + ShadeMode = 0x50, + ReverseNormal = 0x51, + MaterialUpdate = 0x53, + MaterialEmissive = 0x54, + MaterialAmbient = 0x55, + MaterialDiffuse = 0x56, + MaterialSpecular = 0x57, + MaterialAlpha = 0x58, + MaterialSpecularCoef = 0x5b, + AmbientColor = 0x5c, + AmbientAlpha = 0x5d, + LightMode = 0x5e, + LightType0 = 0x5f, + LightType1 = 0x60, + LightType2 = 0x61, + LightType3 = 0x62, + Light0X = 0x63, + Light0Y, + Light0Z, + Light1X, + Light1Y, + Light1Z, + Light2X, + Light2Y, + Light2Z, + Light3X, + Light3Y, + Light3Z, + Light0DirectionX = 0x6f, + Light0DirectionY, + Light0DirectionZ, + Light1DirectionX, + Light1DirectionY, + Light1DirectionZ, + Light2DirectionX, + Light2DirectionY, + Light2DirectionZ, + Light3DirectionX, + Light3DirectionY, + Light3DirectionZ, + Light0ConstantAtten = 0x7b, + Light0LinearAtten, + Light0QuadtraticAtten, + Light1ConstantAtten, + Light1LinearAtten, + Light1QuadtraticAtten, + Light2ConstantAtten, + Light2LinearAtten, + Light2QuadtraticAtten, + Light3ConstantAtten, + Light3LinearAtten, + Light3QuadtraticAtten, + Light0ExponentAtten = 0x87, + Light1ExponentAtten, + Light2ExponentAtten, + Light3ExponentAtten, + Light0CutoffAtten = 0x8b, + Light1CutoffAtten, + Light2CutoffAtten, + Light3CutoffAtten, + Light0Ambient = 0x8f, + Light0Diffuse, + Light0Specular, + Light1Ambient, + Light1Diffuse, + Light1Specular, + Light2Ambient, + Light2Diffuse, + Light2Specular, + Light3Ambient, + Light3Diffuse, + Light3Specular, + Cull = 0x9b, + FrameBufPtr = 0x9c, + FrameBufWidth = 0x9d, + ZBufPtr = 0x9e, + ZBufWidth = 0x9f, + TexAddr0 = 0xa0, + TexAddr1, + TexAddr2, + TexAddr3, + TexAddr4, + TexAddr5, + TexAddr6, + TexAddr7, + TexBufWidth0 = 0xa8, + TexBufWidth1, + TexBufWidth2, + TexBufWidth3, + TexBufWidth4, + TexBufWidth5, + TexBufWidth6, + TexBufWidth7, + ClutAddr = 0xb0, + ClutAddrUpper = 0xb1, + TransferSrc, + TransferSrcW, + TransferDst, + TransferDstW, + TexSize0 = 0xb8, + TexSize1, + TexSize2, + TexSize3, + TexSize4, + TexSize5, + TexSize6, + TexSize7, + TexMapMode = 0xc0, + TexShadeLs = 0xc1, + TexMode = 0xc2, + TexFormat = 0xc3, + LoadClut = 0xc4, + ClutFormat = 0xc5, + TexFilter = 0xc6, + TexWrap = 0xc7, + TexLevel = 0xc8, + TexFunc = 0xc9, + TexEnvColor = 0xca, + TexFlush = 0xcb, + TexSync = 0xcc, + Fog1 = 0xcd, + Fog2 = 0xce, + FogColor = 0xcf, + TexLodSlope = 0xd0, + FramebufPixFormat = 0xd2, + ClearMode = 0xd3, + Scissor1 = 0xd4, + Scissor2 = 0xd5, + MinZ = 0xd6, + MaxZ = 0xd7, + ColorTest = 0xd8, + ColorRef = 0xd9, + ColorTestmask = 0xda, + AlphaTest = 0xdb, + StencilTest = 0xdc, + StencilOp = 0xdd, + ZTest = 0xde, + BlendMode = 0xdf, + BlendFixedA = 0xe0, + BlendFixedB = 0xe1, + Dith0 = 0xe2, + Dith1, + Dith2, + Dith3, + LogicOp = 0xe6, + ZWriteDisable = 0xe7, + MaskRgb = 0xe8, + MaskAlpha = 0xe9, + TransferStart = 0xea, + TransferSrcPos = 0xeb, + TransferDstPos = 0xec, + TransferSize = 0xee, + Vscx = 0xf0, + Vscy = 0xf1, + Vscz = 0xf2, + Vtcs = 0xf3, + Vtct = 0xf4, + Vtcq = 0xf5, + Vcv = 0xf6, + Vap = 0xf7, + Vfc = 0xf8, + Vscv = 0xf9, + + Unknown03 = 0x03, + Unknown0D = 0x0d, + Unknown11 = 0x11, + Unknown29 = 0x29, + Unknown34 = 0x34, + Unknown35 = 0x35, + Unknown39 = 0x39, + Unknown4E = 0x4e, + Unknown4F = 0x4f, + Unknown52 = 0x52, + Unknown59 = 0x59, + Unknown5A = 0x5a, + UnknownB6 = 0xb6, + UnknownB7 = 0xb7, + UnknownD1 = 0xd1, + UnknownED = 0xed, + UnknownEF = 0xef, + UnknownFA = 0xfa, + UnknownFB = 0xfb, + UnknownFC = 0xfc, + UnknownFD = 0xfd, + UnknownFE = 0xfe, + NopFF = 0xff, + } + + #[repr(i32)] + pub enum SceSysMemPartitionId { + SceKernelUnknownPartition = 0, + SceKernelPrimaryKernelPartition = 1, + SceKernelPrimaryUserPartition = 2, + SceKernelOtherKernelPartition1 = 3, + SceKernelOtherKernelPartition2 = 4, + SceKernelVshellPARTITION = 5, + SceKernelScUserPartition = 6, + SceKernelMeUserPartition = 7, + SceKernelExtendedScKernelPartition = 8, + SceKernelExtendedSc2KernelPartition = 9, + SceKernelExtendedMeKernelPartition = 10, + SceKernelVshellKernelPartition = 11, + SceKernelExtendedKernelPartition = 12, + } + + #[repr(i32)] + pub enum SceSysMemBlockTypes { + Low = 0, + High, + Addr, + } + + #[repr(u32)] + pub enum Interrupt { + Gpio = 4, + Ata = 5, + Umd = 6, + Mscm0 = 7, + Wlan = 8, + Audio = 10, + I2c = 12, + Sircs = 14, + Systimer0 = 15, + Systimer1 = 16, + Systimer2 = 17, + Systimer3 = 18, + Thread0 = 19, + Nand = 20, + Dmacplus = 21, + Dma0 = 22, + Dma1 = 23, + Memlmd = 24, + Ge = 25, + Vblank = 30, + Mecodec = 31, + Hpremote = 36, + Mscm1 = 60, + Mscm2 = 61, + Thread1 = 65, + Interrupt = 66, + } + + #[repr(u32)] + pub enum SubInterrupt { + Gpio = Interrupt::Gpio as u32, + Ata = Interrupt::Ata as u32, + Umd = Interrupt::Umd as u32, + Dmacplus = Interrupt::Dmacplus as u32, + Ge = Interrupt::Ge as u32, + Display = Interrupt::Vblank as u32, + } + + #[repr(u32)] + pub enum SceKernelIdListType { + Thread = 1, + Semaphore = 2, + EventFlag = 3, + Mbox = 4, + Vpl = 5, + Fpl = 6, + Mpipe = 7, + Callback = 8, + ThreadEventHandler = 9, + Alarm = 10, + VTimer = 11, + SleepThread = 64, + DelayThread = 65, + SuspendThread = 66, + DormantThread = 67, + } + + #[repr(i32)] + pub enum UsbCamResolution { + Px160_120 = 0, + Px176_144 = 1, + Px320_240 = 2, + Px352_288 = 3, + Px640_480 = 4, + Px1024_768 = 5, + Px1280_960 = 6, + Px480_272 = 7, + Px360_272 = 8, + } + + #[repr(i32)] + pub enum UsbCamResolutionEx { + Px160_120 = 0, + Px176_144 = 1, + Px320_240 = 2, + Px352_288 = 3, + Px360_272 = 4, + Px480_272 = 5, + Px640_480 = 6, + Px1024_768 = 7, + Px1280_960 = 8, + } + + #[repr(i32)] + pub enum UsbCamDelay { + NoDelay = 0, + Delay10Sec = 1, + Delay20Sec = 2, + Delay30Sec = 3, + } + + #[repr(i32)] + pub enum UsbCamFrameRate { + Fps3_75 = 0, + Fps5 = 1, + Fps7_5 = 2, + Fps10 = 3, + Fps15 = 4, + Fps20 = 5, + Fps30 = 6, + Fps60 = 7, + } + + #[repr(i32)] + pub enum UsbCamWb { + Auto = 0, + Daylight = 1, + Fluorescent = 2, + Incadescent = 3, + } + + #[repr(i32)] + pub enum UsbCamEffectMode { + Normal = 0, + Negative = 1, + Blackwhite = 2, + Sepia = 3, + Blue = 4, + Red = 5, + Green = 6, + } + + #[repr(i32)] + pub enum UsbCamEvLevel { + Pos2_0 = 0, + Pos1_7 = 1, + Pos1_5 = 2, + Pos1_3 = 3, + Pos1_0 = 4, + Pos0_7 = 5, + Pos0_5 = 6, + Pos0_3 = 7, + Zero = 8, + Neg0_3, + Neg0_5, + Neg0_7, + Neg1_0, + Neg1_3, + Neg1_5, + Neg1_7, + Neg2_0, + } + + #[repr(i32)] + pub enum RtcCheckValidError { + InvalidYear = -1, + InvalidMonth = -2, + InvalidDay = -3, + InvalidHour = -4, + InvalidMinutes = -5, + InvalidSeconds = -6, + InvalidMicroseconds = -7, + } + + #[repr(u32)] + pub enum PowerTick { + All = 0, + Suspend = 1, + Display = 6, + } + + #[repr(u32)] + pub enum IoAssignPerms { + RdWr = 0, + RdOnly = 1, + } + + #[repr(u32)] + pub enum IoWhence { + Set = 0, + Cur = 1, + End = 2, + } + + #[repr(u32)] + pub enum UmdType { + Game = 0x10, + Video = 0x20, + Audio = 0x40, + } + + #[repr(u32)] + pub enum GuPrimitive { + Points = 0, + Lines = 1, + LineStrip = 2, + Triangles = 3, + TriangleStrip = 4, + TriangleFan = 5, + Sprites = 6, + } + + #[repr(u32)] + pub enum PatchPrimitive { + Points = 0, + LineStrip = 2, + TriangleStrip = 4, + } + + #[repr(u32)] + pub enum GuState { + AlphaTest = 0, + DepthTest = 1, + ScissorTest = 2, + StencilTest = 3, + Blend = 4, + CullFace = 5, + Dither = 6, + Fog = 7, + ClipPlanes = 8, + Texture2D = 9, + Lighting = 10, + Light0 = 11, + Light1 = 12, + Light2 = 13, + Light3 = 14, + LineSmooth = 15, + PatchCullFace = 16, + ColorTest = 17, + ColorLogicOp = 18, + FaceNormalReverse = 19, + PatchFace = 20, + Fragment2X = 21, + } + + #[repr(u32)] + pub enum MatrixMode { + Projection = 0, + View = 1, + Model = 2, + Texture = 3, + } + + #[repr(u32)] + pub enum TexturePixelFormat { + Psm5650 = 0, + Psm5551 = 1, + Psm4444 = 2, + Psm8888 = 3, + PsmT4 = 4, + PsmT8 = 5, + PsmT16 = 6, + PsmT32 = 7, + PsmDxt1 = 8, + PsmDxt3 = 9, + PsmDxt5 = 10, + } + + #[repr(u32)] + pub enum SplineMode { + FillFill = 0, + OpenFill = 1, + FillOpen = 2, + OpenOpen = 3, + } + + #[repr(u32)] + pub enum ShadingModel { + Flat = 0, + Smooth = 1, + } + + #[repr(u32)] + pub enum LogicalOperation { + Clear = 0, + And = 1, + AndReverse = 2, + Copy = 3, + AndInverted = 4, + Noop = 5, + Xor = 6, + Or = 7, + Nor = 8, + Equiv = 9, + Inverted = 10, + OrReverse = 11, + CopyInverted = 12, + OrInverted = 13, + Nand = 14, + Set = 15, + } + + #[repr(u32)] + pub enum TextureFilter { + Nearest = 0, + Linear = 1, + NearestMipmapNearest = 4, + LinearMipmapNearest = 5, + NearestMipmapLinear = 6, + LinearMipmapLinear = 7, + } + + #[repr(u32)] + pub enum TextureMapMode { + TextureCoords = 0, + TextureMatrix = 1, + EnvironmentMap = 2, + } + + #[repr(u32)] + pub enum TextureLevelMode { + Auto = 0, + Const = 1, + Slope = 2, + } + + #[repr(u32)] + pub enum TextureProjectionMapMode { + Position = 0, + Uv = 1, + NormalizedNormal = 2, + Normal = 3, + } + + #[repr(u32)] + pub enum GuTexWrapMode { + Repeat = 0, + Clamp = 1, + } + + #[repr(u32)] + pub enum FrontFaceDirection { + Clockwise = 0, + CounterClockwise = 1, + } + + #[repr(u32)] + pub enum AlphaFunc { + Never = 0, + Always, + Equal, + NotEqual, + Less, + LessOrEqual, + Greater, + GreaterOrEqual, + } + + #[repr(u32)] + pub enum StencilFunc { + Never = 0, + Always, + Equal, + NotEqual, + Less, + LessOrEqual, + Greater, + GreaterOrEqual, + } + + #[repr(u32)] + pub enum ColorFunc { + Never = 0, + Always, + Equal, + NotEqual, + } + + #[repr(u32)] + pub enum DepthFunc { + Never = 0, + Always, + Equal, + NotEqual, + Less, + LessOrEqual, + Greater, + GreaterOrEqual, + } + + #[repr(u32)] + pub enum TextureEffect { + Modulate = 0, + Decal = 1, + Blend = 2, + Replace = 3, + Add = 4, + } + + #[repr(u32)] + pub enum TextureColorComponent { + Rgb = 0, + Rgba = 1, + } + + #[repr(u32)] + pub enum MipmapLevel { + None = 0, + Level1, + Level2, + Level3, + Level4, + Level5, + Level6, + Level7, + } + + #[repr(u32)] + pub enum BlendOp { + Add = 0, + Subtract = 1, + ReverseSubtract = 2, + Min = 3, + Max = 4, + Abs = 5, + } + + #[repr(u32)] + pub enum BlendSrc { + SrcColor = 0, + OneMinusSrcColor = 1, + SrcAlpha = 2, + OneMinusSrcAlpha = 3, + Fix = 10, + } + + #[repr(u32)] + pub enum BlendDst { + DstColor = 0, + OneMinusDstColor = 1, + DstAlpha = 4, + OneMinusDstAlpha = 5, + Fix = 10, + } + + #[repr(u32)] + pub enum StencilOperation { + Keep = 0, + Zero = 1, + Replace = 2, + Invert = 3, + Incr = 4, + Decr = 5, + } + + #[repr(u32)] + pub enum LightMode { + SingleColor = 0, + SeparateSpecularColor = 1, + } + + #[repr(u32)] + pub enum LightType { + Directional = 0, + Pointlight = 1, + Spotlight = 2, + } + + #[repr(u32)] + pub enum GuContextType { + Direct = 0, + Call = 1, + Send = 2, + } + + #[repr(u32)] + pub enum GuQueueMode { + Tail = 0, + Head = 1, + } + + #[repr(u32)] + pub enum GuSyncMode { + Finish = 0, + Signal = 1, + Done = 2, + List = 3, + Send = 4, + } + + #[repr(u32)] + pub enum GuSyncBehavior { + Wait = 0, + NoWait = 1, + } + + #[repr(u32)] + pub enum GuCallbackId { + Signal = 1, + Finish = 4, + } + + #[repr(u32)] + pub enum SignalBehavior { + Suspend = 1, + Continue = 2, + } + + #[repr(u32)] + pub enum ClutPixelFormat { + Psm5650 = 0, + Psm5551 = 1, + Psm4444 = 2, + Psm8888 = 3, + } + + #[repr(C)] + pub enum KeyType { + Directory = 1, + Integer = 2, + String = 3, + Bytes = 4, + } + + #[repr(u32)] + pub enum UtilityMsgDialogMode { + Error, + Text, + } + + #[repr(u32)] + pub enum UtilityMsgDialogPressed { + Unknown1, + Yes, + No, + Back, + } + + #[repr(u32)] + pub enum UtilityDialogButtonAccept { + Circle, + Cross, + } + + #[repr(u32)] + pub enum SceUtilityOskInputLanguage { + Default, + Japanese, + English, + French, + Spanish, + German, + Italian, + Dutch, + Portugese, + Russian, + Korean, + } + + #[repr(u32)] + pub enum SceUtilityOskInputType { + All, + LatinDigit, + LatinSymbol, + LatinLowercase = 4, + LatinUppercase = 8, + JapaneseDigit = 0x100, + JapaneseSymbol = 0x200, + JapaneseLowercase = 0x400, + JapaneseUppercase = 0x800, + JapaneseHiragana = 0x1000, + JapaneseHalfWidthKatakana = 0x2000, + JapaneseKatakana = 0x4000, + JapaneseKanji = 0x8000, + RussianLowercase = 0x10000, + RussianUppercase = 0x20000, + Korean = 0x40000, + Url = 0x80000, + } + + #[repr(u32)] + pub enum SceUtilityOskState { + None, + Initializing, + Initialized, + Visible, + Quit, + Finished, + } + + #[repr(u32)] + pub enum SceUtilityOskResult { + Unchanged, + Cancelled, + Changed, + } + + #[repr(u32)] + pub enum SystemParamLanguage { + Japanese, + English, + French, + Spanish, + German, + Italian, + Dutch, + Portugese, + Russian, + Korean, + ChineseTraditional, + ChineseSimplified, + } + + #[repr(u32)] + pub enum SystemParamId { + StringNickname = 1, + AdhocChannel, + WlanPowerSave, + DateFormat, + TimeFormat, + Timezone, + DaylightSavings, + Language, + Unknown, + } + + #[repr(u32)] + pub enum SystemParamAdhocChannel { + ChannelAutomatic = 0, + Channel1 = 1, + Channel6 = 6, + Channel11 = 11, + } + + #[repr(u32)] + pub enum SystemParamWlanPowerSaveState { + Off, + On, + } + + #[repr(u32)] + pub enum SystemParamDateFormat { + YYYYMMDD, + MMDDYYYY, + DDMMYYYY, + } + + #[repr(u32)] + pub enum SystemParamTimeFormat { + Hour24, + Hour12, + } + + #[repr(u32)] + pub enum SystemParamDaylightSavings { + Std, + Dst, + } + + #[repr(u32)] + pub enum AvModule { + AvCodec, + SasCore, + Atrac3Plus, + MpegBase, + Mp3, + Vaudio, + Aac, + G729, + } + + #[repr(u32)] + pub enum Module { + NetCommon = 0x100, + NetAdhoc, + NetInet, + NetParseUri, + NetHttp, + NetSsl, + + UsbPspCm = 0x200, + UsbMic, + UsbCam, + UsbGps, + + AvCodec = 0x300, + AvSascore, + AvAtrac3Plus, + AvMpegBase, + AvMp3, + AvVaudio, + AvAac, + AvG729, + + NpCommon = 0x400, + NpService, + NpMatching2, + NpDrm = 0x500, + + Irda = 0x600, + } + + #[repr(u32)] + pub enum NetModule { + NetCommon = 1, + NetAdhoc, + NetInet, + NetParseUri, + NetHttp, + NetSsl, + } + + #[repr(u32)] + pub enum UsbModule { + UsbPspCm = 1, + UsbAcc, + UsbMic, + UsbCam, + UsbGps, + } + + #[repr(u32)] + pub enum NetParam { + Name, + Ssid, + Secure, + WepKey, + IsStaticIp, + Ip, + NetMask, + Route, + ManualDns, + PrimaryDns, + SecondaryDns, + ProxyUser, + ProxyPass, + UseProxy, + ProxyServer, + ProxyPort, + Unknown1, + Unknown2, + } + + #[repr(u32)] + pub enum UtilityNetconfAction { + ConnectAP, + DisplayStatus, + ConnectAdhoc, + } + + #[repr(u32)] + pub enum UtilitySavedataMode { + AutoLoad, + AutoSave, + Load, + Save, + ListLoad, + ListSave, + ListDelete, + Delete, + } + + #[repr(u32)] + pub enum UtilitySavedataFocus { + Unknown1, + FirstList, + LastList, + Latest, + Oldest, + Unknown2, + Unknown3, + FirstEmpty, + LastEmpty, + } + + #[repr(u32)] + pub enum UtilityGameSharingMode { + Single = 1, + Multiple, + } + + #[repr(u32)] + pub enum UtilityGameSharingDataType { + File = 1, + Memory, + } + + #[repr(u32)] + pub enum UtilityHtmlViewerInterfaceMode { + Full, + Limited, + None, + } + + #[repr(u32)] + pub enum UtilityHtmlViewerCookieMode { + Disabled = 0, + Enabled, + Confirm, + Default, + } + + #[repr(u32)] + pub enum UtilityHtmlViewerTextSize { + Large, + Normal, + Small, + } + + #[repr(u32)] + pub enum UtilityHtmlViewerDisplayMode { + Normal, + Fit, + SmartFit, + } + + #[repr(u32)] + pub enum UtilityHtmlViewerConnectMode { + Last, + ManualOnce, + ManualAll, + } + + #[repr(u32)] + pub enum UtilityHtmlViewerDisconnectMode { + Enable, + Disable, + Confirm, + } + + #[repr(u32)] + pub enum ScePspnetAdhocPtpState { + Closed, + Listen, + SynSent, + SynReceived, + Established, + } + + #[repr(u32)] + pub enum AdhocMatchingMode { + Host = 1, + Client, + Ptp, + } + + #[repr(u32)] + pub enum ApctlState { + Disconnected, + Scanning, + Joining, + GettingIp, + GotIp, + EapAuth, + KeyExchange, + } + + #[repr(u32)] + pub enum ApctlEvent { + ConnectRequest, + ScanRequest, + ScanComplete, + Established, + GetIp, + DisconnectRequest, + Error, + Info, + EapAuth, + KeyExchange, + Reconnect, + } + + #[repr(u32)] + pub enum ApctlInfo { + ProfileName, + Bssid, + Ssid, + SsidLength, + SecurityType, + Strength, + Channel, + PowerSave, + Ip, + SubnetMask, + Gateway, + PrimaryDns, + SecondaryDns, + UseProxy, + ProxyUrl, + ProxyPort, + EapType, + StartBrowser, + Wifisp, + } + + #[repr(u32)] + pub enum ApctlInfoSecurityType { + None, + Wep, + Wpa, + } + + #[repr(u32)] + pub enum HttpMethod { + Get, + Post, + Head, + } + + #[repr(u32)] + pub enum HttpAuthType { + Basic, + Digest, + } +} + +s_paren! { + #[repr(transparent)] + pub struct SceUid(pub i32); + + #[repr(transparent)] + pub struct SceMpeg(*mut *mut c_void); + + #[repr(transparent)] + pub struct SceMpegStream(*mut c_void); + + #[repr(transparent)] + pub struct Mp3Handle(pub i32); + + #[repr(transparent)] + pub struct RegHandle(u32); +} + +s! { + pub struct sockaddr { + pub sa_len: u8, + pub sa_family: u8, + pub sa_data: [u8;14], + } + + pub struct in_addr { + pub s_addr: u32, + } + + pub struct AudioInputParams { + pub unknown1: i32, + pub gain: i32, + pub unknown2: i32, + pub unknown3: i32, + pub unknown4: i32, + pub unknown5: i32, + } + + pub struct Atrac3BufferInfo { + pub puc_write_position_first_buf: *mut u8, + pub ui_writable_byte_first_buf: u32, + pub ui_min_write_byte_first_buf: u32, + pub ui_read_position_first_buf: u32, + pub puc_write_position_second_buf: *mut u8, + pub ui_writable_byte_second_buf: u32, + pub ui_min_write_byte_second_buf: u32, + pub ui_read_position_second_buf: u32, + } + + pub struct SceCtrlData { + pub timestamp: u32, + pub buttons: i32, + pub lx: u8, + pub ly: u8, + pub rsrv: [u8; 6], + } + + pub struct SceCtrlLatch { + pub ui_make: u32, + pub ui_break: u32, + pub ui_press: u32, + pub ui_release: u32, + } + + pub struct GeStack { + pub stack: [u32; 8], + } + + pub struct GeCallbackData { + pub signal_func: ::Option, + pub signal_arg: *mut c_void, + pub finish_func: ::Option, + pub finish_arg: *mut c_void, + } + + pub struct GeListArgs { + pub size: u32, + pub context: *mut GeContext, + pub num_stacks: u32, + pub stacks: *mut GeStack, + } + + pub struct GeBreakParam { + pub buf: [u32; 4], + } + + pub struct SceKernelLoadExecParam { + pub size: usize, + pub args: usize, + pub argp: *mut c_void, + pub key: *const u8, + } + + pub struct timeval { + pub tv_sec: i32, + pub tv_usec: i32, + } + + pub struct timezone { + pub tz_minutes_west: i32, + pub tz_dst_time: i32, + } + + pub struct IntrHandlerOptionParam { + size: i32, + entry: u32, + common: u32, + gp: u32, + intr_code: u16, + sub_count: u16, + intr_level: u16, + enabled: u16, + calls: u32, + field_1c: u32, + total_clock_lo: u32, + total_clock_hi: u32, + min_clock_lo: u32, + min_clock_hi: u32, + max_clock_lo: u32, + max_clock_hi: u32, + } + + pub struct SceKernelLMOption { + pub size: usize, + pub m_pid_text: SceUid, + pub m_pid_data: SceUid, + pub flags: u32, + pub position: u8, + pub access: u8, + pub c_reserved: [u8; 2usize], + } + + pub struct SceKernelSMOption { + pub size: usize, + pub m_pid_stack: SceUid, + pub stack_size: usize, + pub priority: i32, + pub attribute: u32, + } + + pub struct SceKernelModuleInfo { + pub size: usize, + pub n_segment: u8, + pub reserved: [u8; 3usize], + pub segment_addr: [i32; 4usize], + pub segment_size: [i32; 4usize], + pub entry_addr: u32, + pub gp_value: u32, + pub text_addr: u32, + pub text_size: u32, + pub data_size: u32, + pub bss_size: u32, + pub attribute: u16, + pub version: [u8; 2usize], + pub name: [u8; 28usize], + } + + pub struct DebugProfilerRegs { + pub enable: u32, + pub systemck: u32, + pub cpuck: u32, + pub internal: u32, + pub memory: u32, + pub copz: u32, + pub vfpu: u32, + pub sleep: u32, + pub bus_access: u32, + pub uncached_load: u32, + pub uncached_store: u32, + pub cached_load: u32, + pub cached_store: u32, + pub i_miss: u32, + pub d_miss: u32, + pub d_writeback: u32, + pub cop0_inst: u32, + pub fpu_inst: u32, + pub vfpu_inst: u32, + pub local_bus: u32, + } + + pub struct SceKernelSysClock { + pub low: u32, + pub hi: u32, + } + + pub struct SceKernelThreadOptParam { + pub size: usize, + pub stack_mpid: SceUid, + } + + pub struct SceKernelThreadInfo { + pub size: usize, + pub name: [u8; 32], + pub attr: u32, + pub status: i32, + pub entry: SceKernelThreadEntry, + pub stack: *mut c_void, + pub stack_size: i32, + pub gp_reg: *mut c_void, + pub init_priority: i32, + pub current_priority: i32, + pub wait_type: i32, + pub wait_id: SceUid, + pub wakeup_count: i32, + pub exit_status: i32, + pub run_clocks: SceKernelSysClock, + pub intr_preempt_count: u32, + pub thread_preempt_count: u32, + pub release_count: u32, + } + + pub struct SceKernelThreadRunStatus { + pub size: usize, + pub status: i32, + pub current_priority: i32, + pub wait_type: i32, + pub wait_id: i32, + pub wakeup_count: i32, + pub run_clocks: SceKernelSysClock, + pub intr_preempt_count: u32, + pub thread_preempt_count: u32, + pub release_count: u32, + } + + pub struct SceKernelSemaOptParam { + pub size: usize, + } + + pub struct SceKernelSemaInfo { + pub size: usize, + pub name: [u8; 32], + pub attr: u32, + pub init_count: i32, + pub current_count: i32, + pub max_count: i32, + pub num_wait_threads: i32, + } + + pub struct SceKernelEventFlagInfo { + pub size: usize, + pub name: [u8; 32], + pub attr: u32, + pub init_pattern: u32, + pub current_pattern: u32, + pub num_wait_threads: i32, + } + + pub struct SceKernelEventFlagOptParam { + pub size: usize, + } + + pub struct SceKernelMbxOptParam { + pub size: usize, + } + + pub struct SceKernelMbxInfo { + pub size: usize, + pub name: [u8; 32usize], + pub attr: u32, + pub num_wait_threads: i32, + pub num_messages: i32, + pub first_message: *mut c_void, + } + + pub struct SceKernelVTimerInfo { + pub size: usize, + pub name: [u8; 32], + pub active: i32, + pub base: SceKernelSysClock, + pub current: SceKernelSysClock, + pub schedule: SceKernelSysClock, + pub handler: SceKernelVTimerHandler, + pub common: *mut c_void, + } + + pub struct SceKernelThreadEventHandlerInfo { + pub size: usize, + pub name: [u8; 32], + pub thread_id: SceUid, + pub mask: i32, + pub handler: SceKernelThreadEventHandler, + pub common: *mut c_void, + } + + pub struct SceKernelAlarmInfo { + pub size: usize, + pub schedule: SceKernelSysClock, + pub handler: SceKernelAlarmHandler, + pub common: *mut c_void, + } + + pub struct SceKernelSystemStatus { + pub size: usize, + pub status: u32, + pub idle_clocks: SceKernelSysClock, + pub comes_out_of_idle_count: u32, + pub thread_switch_count: u32, + pub vfpu_switch_count: u32, + } + + pub struct SceKernelMppInfo { + pub size: usize, + pub name: [u8; 32], + pub attr: u32, + pub buf_size: i32, + pub free_size: i32, + pub num_send_wait_threads: i32, + pub num_receive_wait_threads: i32, + } + + pub struct SceKernelVplOptParam { + pub size: usize, + } + + pub struct SceKernelVplInfo { + pub size: usize, + pub name: [u8; 32], + pub attr: u32, + pub pool_size: i32, + pub free_size: i32, + pub num_wait_threads: i32, + } + + pub struct SceKernelFplOptParam { + pub size: usize, + } + + pub struct SceKernelFplInfo { + pub size: usize, + pub name: [u8; 32usize], + pub attr: u32, + pub block_size: i32, + pub num_blocks: i32, + pub free_blocks: i32, + pub num_wait_threads: i32, + } + + pub struct SceKernelVTimerOptParam { + pub size: usize, + } + + pub struct SceKernelCallbackInfo { + pub size: usize, + pub name: [u8; 32usize], + pub thread_id: SceUid, + pub callback: SceKernelCallbackFunction, + pub common: *mut c_void, + pub notify_count: i32, + pub notify_arg: i32, + } + + pub struct UsbCamSetupStillParam { + pub size: i32, + pub resolution: UsbCamResolution, + pub jpeg_size: i32, + pub reverse_flags: i32, + pub delay: UsbCamDelay, + pub comp_level: i32, + } + + pub struct UsbCamSetupStillExParam { + pub size: i32, + pub unk: u32, + pub resolution: UsbCamResolutionEx, + pub jpeg_size: i32, + pub comp_level: i32, + pub unk2: u32, + pub unk3: u32, + pub flip: i32, + pub mirror: i32, + pub delay: UsbCamDelay, + pub unk4: [u32; 5usize], + } + + pub struct UsbCamSetupVideoParam { + pub size: i32, + pub resolution: UsbCamResolution, + pub framerate: UsbCamFrameRate, + pub white_balance: UsbCamWb, + pub saturation: i32, + pub brightness: i32, + pub contrast: i32, + pub sharpness: i32, + pub effect_mode: UsbCamEffectMode, + pub frame_size: i32, + pub unk: u32, + pub evl_evel: UsbCamEvLevel, + } + + pub struct UsbCamSetupVideoExParam { + pub size: i32, + pub unk: u32, + pub resolution: UsbCamResolutionEx, + pub framerate: UsbCamFrameRate, + pub unk2: u32, + pub unk3: u32, + pub white_balance: UsbCamWb, + pub saturation: i32, + pub brightness: i32, + pub contrast: i32, + pub sharpness: i32, + pub unk4: u32, + pub unk5: u32, + pub unk6: [u32; 3usize], + pub effect_mode: UsbCamEffectMode, + pub unk7: u32, + pub unk8: u32, + pub unk9: u32, + pub unk10: u32, + pub unk11: u32, + pub frame_size: i32, + pub unk12: u32, + pub ev_level: UsbCamEvLevel, + } + + pub struct ScePspDateTime { + pub year: u16, + pub month: u16, + pub day: u16, + pub hour: u16, + pub minutes: u16, + pub seconds: u16, + pub microseconds: u32, + } + + pub struct SceIoStat { + pub st_mode: i32, + pub st_attr: i32, + pub st_size: i64, + pub st_ctime: ScePspDateTime, + pub st_atime: ScePspDateTime, + pub st_mtime: ScePspDateTime, + pub st_private: [u32; 6usize], + } + + pub struct UmdInfo { + pub size: u32, + pub type_: UmdType, + } + + pub struct SceMpegRingbuffer { + pub packets: i32, + pub unk0: u32, + pub unk1: u32, + pub unk2: u32, + pub unk3: u32, + pub data: *mut c_void, + pub callback: SceMpegRingbufferCb, + pub cb_param: *mut c_void, + pub unk4: u32, + pub unk5: u32, + pub sce_mpeg: *mut c_void, + } + + pub struct SceMpegAu { + pub pts_msb: u32, + pub pts: u32, + pub dts_msb: u32, + pub dts: u32, + pub es_buffer: u32, + pub au_size: u32, + } + + pub struct SceMpegAvcMode { + pub unk0: i32, + pub pixel_format: super::DisplayPixelFormat, + } + + #[repr(align(64))] + pub struct SceMpegLLI { + pub src: *mut c_void, + pub dst: *mut c_void, + pub next: *mut c_void, + pub size: i32, + } + + #[repr(align(64))] + pub struct SceMpegYCrCbBuffer { + pub frame_buffer_height16: i32, + pub frame_buffer_width16: i32, + pub unknown: i32, + pub unknown2: i32, + pub y_buffer: *mut c_void, + pub y_buffer2: *mut c_void, + pub cr_buffer: *mut c_void, + pub cb_buffer: *mut c_void, + pub cr_buffer2: *mut c_void, + pub cb_buffer2: *mut c_void, + + pub frame_height: i32, + pub frame_width: i32, + pub frame_buffer_width: i32, + pub unknown3: [i32; 11usize], + } + + pub struct ScePspSRect { + pub x: i16, + pub y: i16, + pub w: i16, + pub h: i16, + } + + pub struct ScePspIRect { + pub x: i32, + pub y: i32, + pub w: i32, + pub h: i32, + } + + pub struct ScePspL64Rect { + pub x: u64, + pub y: u64, + pub w: u64, + pub h: u64, + } + + pub struct ScePspSVector2 { + pub x: i16, + pub y: i16, + } + + pub struct ScePspIVector2 { + pub x: i32, + pub y: i32, + } + + pub struct ScePspL64Vector2 { + pub x: u64, + pub y: u64, + } + + pub struct ScePspSVector3 { + pub x: i16, + pub y: i16, + pub z: i16, + } + + pub struct ScePspIVector3 { + pub x: i32, + pub y: i32, + pub z: i32, + } + + pub struct ScePspL64Vector3 { + pub x: u64, + pub y: u64, + pub z: u64, + } + + pub struct ScePspSVector4 { + pub x: i16, + pub y: i16, + pub z: i16, + pub w: i16, + } + + pub struct ScePspIVector4 { + pub x: i32, + pub y: i32, + pub z: i32, + pub w: i32, + } + + pub struct ScePspL64Vector4 { + pub x: u64, + pub y: u64, + pub z: u64, + pub w: u64, + } + + pub struct ScePspIMatrix2 { + pub x: ScePspIVector2, + pub y: ScePspIVector2, + } + + pub struct ScePspIMatrix3 { + pub x: ScePspIVector3, + pub y: ScePspIVector3, + pub z: ScePspIVector3, + } + + #[repr(align(16))] + pub struct ScePspIMatrix4 { + pub x: ScePspIVector4, + pub y: ScePspIVector4, + pub z: ScePspIVector4, + pub w: ScePspIVector4, + } + + pub struct ScePspIMatrix4Unaligned { + pub x: ScePspIVector4, + pub y: ScePspIVector4, + pub z: ScePspIVector4, + pub w: ScePspIVector4, + } + + pub struct SceMp3InitArg { + pub mp3_stream_start: u32, + pub unk1: u32, + pub mp3_stream_end: u32, + pub unk2: u32, + pub mp3_buf: *mut c_void, + pub mp3_buf_size: i32, + pub pcm_buf: *mut c_void, + pub pcm_buf_size: i32, + } + + pub struct OpenPSID { + pub data: [u8; 16usize], + } + + pub struct UtilityDialogCommon { + pub size: u32, + pub language: SystemParamLanguage, + pub button_accept: UtilityDialogButtonAccept, + pub graphics_thread: i32, + pub access_thread: i32, + pub font_thread: i32, + pub sound_thread: i32, + pub result: i32, + pub reserved: [i32; 4usize], + } + + pub struct UtilityNetconfAdhoc { + pub name: [u8; 8usize], + pub timeout: u32, + } + + pub struct UtilityNetconfData { + pub base: UtilityDialogCommon, + pub action: UtilityNetconfAction, + pub adhocparam: *mut UtilityNetconfAdhoc, + pub hotspot: i32, + pub hotspot_connected: i32, + pub wifisp: i32, + } + + pub struct UtilitySavedataFileData { + pub buf: *mut c_void, + pub buf_size: usize, + pub size: usize, + pub unknown: i32, + } + + pub struct UtilitySavedataListSaveNewData { + pub icon0: UtilitySavedataFileData, + pub title: *mut u8, + } + + pub struct UtilityGameSharingParams { + pub base: UtilityDialogCommon, + pub unknown1: i32, + pub unknown2: i32, + pub name: [u8; 8usize], + pub unknown3: i32, + pub unknown4: i32, + pub unknown5: i32, + pub result: i32, + pub filepath: *mut u8, + pub mode: UtilityGameSharingMode, + pub datatype: UtilityGameSharingDataType, + pub data: *mut c_void, + pub datasize: u32, + } + + pub struct UtilityHtmlViewerParam { + pub base: UtilityDialogCommon, + pub memaddr: *mut c_void, + pub memsize: u32, + pub unknown1: i32, + pub unknown2: i32, + pub initialurl: *mut u8, + pub numtabs: u32, + pub interfacemode: UtilityHtmlViewerInterfaceMode, + pub options: i32, + pub dldirname: *mut u8, + pub dlfilename: *mut u8, + pub uldirname: *mut u8, + pub ulfilename: *mut u8, + pub cookiemode: UtilityHtmlViewerCookieMode, + pub unknown3: u32, + pub homeurl: *mut u8, + pub textsize: UtilityHtmlViewerTextSize, + pub displaymode: UtilityHtmlViewerDisplayMode, + pub connectmode: UtilityHtmlViewerConnectMode, + pub disconnectmode: UtilityHtmlViewerDisconnectMode, + pub memused: u32, + pub unknown4: [i32; 10usize], + } + + pub struct SceUtilityOskData { + pub unk_00: i32, + pub unk_04: i32, + pub language: SceUtilityOskInputLanguage, + pub unk_12: i32, + pub inputtype: SceUtilityOskInputType, + pub lines: i32, + pub unk_24: i32, + pub desc: *mut u16, + pub intext: *mut u16, + pub outtextlength: i32, + pub outtext: *mut u16, + pub result: SceUtilityOskResult, + pub outtextlimit: i32, + } + + pub struct SceUtilityOskParams { + pub base: UtilityDialogCommon, + pub datacount: i32, + pub data: *mut SceUtilityOskData, + pub state: SceUtilityOskState, + pub unk_60: i32, + } + + pub struct SceNetMallocStat { + pub pool: i32, + pub maximum: i32, + pub free: i32, + } + + pub struct SceNetAdhocctlAdhocId { + pub unknown: i32, + pub adhoc_id: [u8; 9usize], + pub unk: [u8; 3usize], + } + + pub struct SceNetAdhocctlScanInfo { + pub next: *mut SceNetAdhocctlScanInfo, + pub channel: i32, + pub name: [u8; 8usize], + pub bssid: [u8; 6usize], + pub unknown: [u8; 2usize], + pub unknown2: i32, + } + + pub struct SceNetAdhocctlGameModeInfo { + pub count: i32, + pub macs: [[u8; 6usize]; 16usize], + } + + pub struct SceNetAdhocPtpStat { + pub next: *mut SceNetAdhocPtpStat, + pub ptp_id: i32, + pub mac: [u8; 6usize], + pub peermac: [u8; 6usize], + pub port: u16, + pub peerport: u16, + pub sent_data: u32, + pub rcvd_data: u32, + pub state: ScePspnetAdhocPtpState, + } + + pub struct SceNetAdhocPdpStat { + pub next: *mut SceNetAdhocPdpStat, + pub pdp_id: i32, + pub mac: [u8; 6usize], + pub port: u16, + pub rcvd_data: u32, + } + + pub struct AdhocPoolStat { + pub size: i32, + pub maxsize: i32, + pub freesize: i32, + } +} + +s_no_extra_traits! { + #[allow(missing_debug_implementations)] + pub struct GeContext { + pub context: [u32; 512], + } + + #[allow(missing_debug_implementations)] + pub struct SceKernelUtilsSha1Context { + pub h: [u32; 5usize], + pub us_remains: u16, + pub us_computed: u16, + pub ull_total_len: u64, + pub buf: [u8; 64usize], + } + + #[allow(missing_debug_implementations)] + pub struct SceKernelUtilsMt19937Context { + pub count: u32, + pub state: [u32; 624usize], + } + + #[allow(missing_debug_implementations)] + pub struct SceKernelUtilsMd5Context { + pub h: [u32; 4usize], + pub pad: u32, + pub us_remains: u16, + pub us_computed: u16, + pub ull_total_len: u64, + pub buf: [u8; 64usize], + } + + #[allow(missing_debug_implementations)] + pub struct SceIoDirent { + pub d_stat: SceIoStat, + pub d_name: [u8; 256usize], + pub d_private: *mut c_void, + pub dummy: i32, + } + + #[cfg_attr(feature = "extra_traits", derive(Debug))] + pub struct ScePspFRect { + pub x: f32, + pub y: f32, + pub w: f32, + pub h: f32, + } + + #[repr(align(16))] + #[cfg_attr(feature = "extra_traits", derive(Debug))] + pub struct ScePspFVector3 { + pub x: f32, + pub y: f32, + pub z: f32, + } + + #[repr(align(16))] + #[cfg_attr(feature = "extra_traits", derive(Debug))] + pub struct ScePspFVector4 { + pub x: f32, + pub y: f32, + pub z: f32, + pub w: f32, + } + + #[cfg_attr(feature = "extra_traits", derive(Debug))] + pub struct ScePspFVector4Unaligned { + pub x: f32, + pub y: f32, + pub z: f32, + pub w: f32, + } + + #[cfg_attr(feature = "extra_traits", derive(Debug))] + pub struct ScePspFVector2 { + pub x: f32, + pub y: f32, + } + + #[cfg_attr(feature = "extra_traits", derive(Debug))] + pub struct ScePspFMatrix2 { + pub x: ScePspFVector2, + pub y: ScePspFVector2, + } + + #[cfg_attr(feature = "extra_traits", derive(Debug))] + pub struct ScePspFMatrix3 { + pub x: ScePspFVector3, + pub y: ScePspFVector3, + pub z: ScePspFVector3, + } + + #[cfg_attr(feature = "extra_traits", derive(Debug))] + #[repr(align(16))] + pub struct ScePspFMatrix4 { + pub x: ScePspFVector4, + pub y: ScePspFVector4, + pub z: ScePspFVector4, + pub w: ScePspFVector4, + } + + #[allow(missing_debug_implementations)] + pub struct ScePspFMatrix4Unaligned { + pub x: ScePspFVector4, + pub y: ScePspFVector4, + pub z: ScePspFVector4, + pub w: ScePspFVector4, + } + + #[allow(missing_debug_implementations)] + pub union ScePspVector3 { + pub fv: ScePspFVector3, + pub iv: ScePspIVector3, + pub f: [f32; 3usize], + pub i: [i32; 3usize], + } + + #[allow(missing_debug_implementations)] + pub union ScePspVector4 { + pub fv: ScePspFVector4, + pub iv: ScePspIVector4, + pub qw: u128, + pub f: [f32; 4usize], + pub i: [i32; 4usize], + } + + #[allow(missing_debug_implementations)] + pub union ScePspMatrix2 { + pub fm: ScePspFMatrix2, + pub im: ScePspIMatrix2, + pub fv: [ScePspFVector2; 2usize], + pub iv: [ScePspIVector2; 2usize], + pub v: [ScePspVector2; 2usize], + pub f: [[f32; 2usize]; 2usize], + pub i: [[i32; 2usize]; 2usize], + } + + #[allow(missing_debug_implementations)] + pub union ScePspMatrix3 { + pub fm: ScePspFMatrix3, + pub im: ScePspIMatrix3, + pub fv: [ScePspFVector3; 3usize], + pub iv: [ScePspIVector3; 3usize], + pub v: [ScePspVector3; 3usize], + pub f: [[f32; 3usize]; 3usize], + pub i: [[i32; 3usize]; 3usize], + } + + #[allow(missing_debug_implementations)] + pub union ScePspVector2 { + pub fv: ScePspFVector2, + pub iv: ScePspIVector2, + pub f: [f32; 2usize], + pub i: [i32; 2usize], + } + + #[allow(missing_debug_implementations)] + pub union ScePspMatrix4 { + pub fm: ScePspFMatrix4, + pub im: ScePspIMatrix4, + pub fv: [ScePspFVector4; 4usize], + pub iv: [ScePspIVector4; 4usize], + pub v: [ScePspVector4; 4usize], + pub f: [[f32; 4usize]; 4usize], + pub i: [[i32; 4usize]; 4usize], + } + + #[allow(missing_debug_implementations)] + pub struct Key { + pub key_type: KeyType, + pub name: [u8; 256usize], + pub name_len: u32, + pub unk2: u32, + pub unk3: u32, + } + + #[allow(missing_debug_implementations)] + pub struct UtilityMsgDialogParams { + pub base: UtilityDialogCommon, + pub unknown: i32, + pub mode: UtilityMsgDialogMode, + pub error_value: u32, + pub message: [u8; 512usize], + pub options: i32, + pub button_pressed: UtilityMsgDialogPressed, + } + + #[allow(missing_debug_implementations)] + pub union UtilityNetData { + pub as_uint: u32, + pub as_string: [u8; 128usize], + } + + #[allow(missing_debug_implementations)] + pub struct UtilitySavedataSFOParam { + pub title: [u8; 128usize], + pub savedata_title: [u8; 128usize], + pub detail: [u8; 1024usize], + pub parental_level: u8, + pub unknown: [u8; 3usize], + } + + #[allow(missing_debug_implementations)] + pub struct SceUtilitySavedataParam { + pub base: UtilityDialogCommon, + pub mode: UtilitySavedataMode, + pub unknown1: i32, + pub overwrite: i32, + pub game_name: [u8; 13usize], + pub reserved: [u8; 3usize], + pub save_name: [u8; 20usize], + pub save_name_list: *mut [u8; 20usize], + pub file_name: [u8; 13usize], + pub reserved1: [u8; 3usize], + pub data_buf: *mut c_void, + pub data_buf_size: usize, + pub data_size: usize, + pub sfo_param: UtilitySavedataSFOParam, + pub icon0_file_data: UtilitySavedataFileData, + pub icon1_file_data: UtilitySavedataFileData, + pub pic1_file_data: UtilitySavedataFileData, + pub snd0_file_data: UtilitySavedataFileData, + pub new_data: *mut UtilitySavedataListSaveNewData, + pub focus: UtilitySavedataFocus, + pub unknown2: [i32; 4usize], + pub key: [u8; 16], + pub unknown3: [u8; 20], + } + + #[allow(missing_debug_implementations)] + pub struct SceNetAdhocctlPeerInfo { + pub next: *mut SceNetAdhocctlPeerInfo, + pub nickname: [u8; 128usize], + pub mac: [u8; 6usize], + pub unknown: [u8; 6usize], + pub timestamp: u32, + } + + #[allow(missing_debug_implementations)] + pub struct SceNetAdhocctlParams { + pub channel: i32, + pub name: [u8; 8usize], + pub bssid: [u8; 6usize], + pub nickname: [u8; 128usize], + } + + #[cfg_attr(feature = "extra_traits", allow(missing_debug_implementations))] + pub union SceNetApctlInfo { + pub name: [u8; 64usize], + pub bssid: [u8; 6usize], + pub ssid: [u8; 32usize], + pub ssid_length: u32, + pub security_type: u32, + pub strength: u8, + pub channel: u8, + pub power_save: u8, + pub ip: [u8; 16usize], + pub sub_net_mask: [u8; 16usize], + pub gateway: [u8; 16usize], + pub primary_dns: [u8; 16usize], + pub secondary_dns: [u8; 16usize], + pub use_proxy: u32, + pub proxy_url: [u8; 128usize], + pub proxy_port: u16, + pub eap_type: u32, + pub start_browser: u32, + pub wifisp: u32, + } +} + +pub const INT_MIN: c_int = -2147483648; +pub const INT_MAX: c_int = 2147483647; + +pub const AUDIO_VOLUME_MAX: u32 = 0x8000; +pub const AUDIO_CHANNEL_MAX: u32 = 8; +pub const AUDIO_NEXT_CHANNEL: i32 = -1; +pub const AUDIO_SAMPLE_MIN: u32 = 64; +pub const AUDIO_SAMPLE_MAX: u32 = 65472; + +pub const PSP_CTRL_SELECT: i32 = 0x000001; +pub const PSP_CTRL_START: i32 = 0x000008; +pub const PSP_CTRL_UP: i32 = 0x000010; +pub const PSP_CTRL_RIGHT: i32 = 0x000020; +pub const PSP_CTRL_DOWN: i32 = 0x000040; +pub const PSP_CTRL_LEFT: i32 = 0x000080; +pub const PSP_CTRL_LTRIGGER: i32 = 0x000100; +pub const PSP_CTRL_RTRIGGER: i32 = 0x000200; +pub const PSP_CTRL_TRIANGLE: i32 = 0x001000; +pub const PSP_CTRL_CIRCLE: i32 = 0x002000; +pub const PSP_CTRL_CROSS: i32 = 0x004000; +pub const PSP_CTRL_SQUARE: i32 = 0x008000; +pub const PSP_CTRL_HOME: i32 = 0x010000; +pub const PSP_CTRL_HOLD: i32 = 0x020000; +pub const PSP_CTRL_NOTE: i32 = 0x800000; +pub const PSP_CTRL_SCREEN: i32 = 0x400000; +pub const PSP_CTRL_VOLUP: i32 = 0x100000; +pub const PSP_CTRL_VOLDOWN: i32 = 0x200000; +pub const PSP_CTRL_WLAN_UP: i32 = 0x040000; +pub const PSP_CTRL_REMOTE: i32 = 0x080000; +pub const PSP_CTRL_DISC: i32 = 0x1000000; +pub const PSP_CTRL_MS: i32 = 0x2000000; + +pub const USB_CAM_PID: i32 = 0x282; +pub const USB_BUS_DRIVER_NAME: &str = "USBBusDriver"; +pub const USB_CAM_DRIVER_NAME: &str = "USBCamDriver"; +pub const USB_CAM_MIC_DRIVER_NAME: &str = "USBCamMicDriver"; +pub const USB_STOR_DRIVER_NAME: &str = "USBStor_Driver"; + +pub const ACTIVATED: i32 = 0x200; +pub const CONNECTED: i32 = 0x020; +pub const ESTABLISHED: i32 = 0x002; + +pub const USB_CAM_FLIP: i32 = 1; +pub const USB_CAM_MIRROR: i32 = 0x100; + +pub const THREAD_ATTR_VFPU: i32 = 0x00004000; +pub const THREAD_ATTR_USER: i32 = 0x80000000; +pub const THREAD_ATTR_USBWLAN: i32 = 0xa0000000; +pub const THREAD_ATTR_VSH: i32 = 0xc0000000; +pub const THREAD_ATTR_SCRATCH_SRAM: i32 = 0x00008000; +pub const THREAD_ATTR_NO_FILLSTACK: i32 = 0x00100000; +pub const THREAD_ATTR_CLEAR_STACK: i32 = 0x00200000; + +pub const EVENT_WAIT_MULTIPLE: i32 = 0x200; + +pub const EVENT_WAIT_AND: i32 = 0; +pub const EVENT_WAIT_OR: i32 = 1; +pub const EVENT_WAIT_CLEAR: i32 = 0x20; + +pub const POWER_INFO_POWER_SWITCH: i32 = 0x80000000; +pub const POWER_INFO_HOLD_SWITCH: i32 = 0x40000000; +pub const POWER_INFO_STANDBY: i32 = 0x00080000; +pub const POWER_INFO_RESUME_COMPLETE: i32 = 0x00040000; +pub const POWER_INFO_RESUMING: i32 = 0x00020000; +pub const POWER_INFO_SUSPENDING: i32 = 0x00010000; +pub const POWER_INFO_AC_POWER: i32 = 0x00001000; +pub const POWER_INFO_BATTERY_LOW: i32 = 0x00000100; +pub const POWER_INFO_BATTERY_EXIST: i32 = 0x00000080; +pub const POWER_INFO_BATTERY_POWER: i32 = 0x0000007; + +pub const FIO_S_IFLNK: i32 = 0x4000; +pub const FIO_S_IFDIR: i32 = 0x1000; +pub const FIO_S_IFREG: i32 = 0x2000; +pub const FIO_S_ISUID: i32 = 0x0800; +pub const FIO_S_ISGID: i32 = 0x0400; +pub const FIO_S_ISVTX: i32 = 0x0200; +pub const FIO_S_IRUSR: i32 = 0x0100; +pub const FIO_S_IWUSR: i32 = 0x0080; +pub const FIO_S_IXUSR: i32 = 0x0040; +pub const FIO_S_IRGRP: i32 = 0x0020; +pub const FIO_S_IWGRP: i32 = 0x0010; +pub const FIO_S_IXGRP: i32 = 0x0008; +pub const FIO_S_IROTH: i32 = 0x0004; +pub const FIO_S_IWOTH: i32 = 0x0002; +pub const FIO_S_IXOTH: i32 = 0x0001; + +pub const FIO_SO_IFLNK: i32 = 0x0008; +pub const FIO_SO_IFDIR: i32 = 0x0010; +pub const FIO_SO_IFREG: i32 = 0x0020; +pub const FIO_SO_IROTH: i32 = 0x0004; +pub const FIO_SO_IWOTH: i32 = 0x0002; +pub const FIO_SO_IXOTH: i32 = 0x0001; + +pub const PSP_O_RD_ONLY: i32 = 0x0001; +pub const PSP_O_WR_ONLY: i32 = 0x0002; +pub const PSP_O_RD_WR: i32 = 0x0003; +pub const PSP_O_NBLOCK: i32 = 0x0004; +pub const PSP_O_DIR: i32 = 0x0008; +pub const PSP_O_APPEND: i32 = 0x0100; +pub const PSP_O_CREAT: i32 = 0x0200; +pub const PSP_O_TRUNC: i32 = 0x0400; +pub const PSP_O_EXCL: i32 = 0x0800; +pub const PSP_O_NO_WAIT: i32 = 0x8000; + +pub const UMD_NOT_PRESENT: i32 = 0x01; +pub const UMD_PRESENT: i32 = 0x02; +pub const UMD_CHANGED: i32 = 0x04; +pub const UMD_INITING: i32 = 0x08; +pub const UMD_INITED: i32 = 0x10; +pub const UMD_READY: i32 = 0x20; + +pub const PLAY_PAUSE: i32 = 0x1; +pub const FORWARD: i32 = 0x4; +pub const BACK: i32 = 0x8; +pub const VOL_UP: i32 = 0x10; +pub const VOL_DOWN: i32 = 0x20; +pub const HOLD: i32 = 0x80; + +pub const GU_PI: f32 = 3.141593; + +pub const GU_TEXTURE_8BIT: i32 = 1; +pub const GU_TEXTURE_16BIT: i32 = 2; +pub const GU_TEXTURE_32BITF: i32 = 3; +pub const GU_COLOR_5650: i32 = 4 << 2; +pub const GU_COLOR_5551: i32 = 5 << 2; +pub const GU_COLOR_4444: i32 = 6 << 2; +pub const GU_COLOR_8888: i32 = 7 << 2; +pub const GU_NORMAL_8BIT: i32 = 1 << 5; +pub const GU_NORMAL_16BIT: i32 = 2 << 5; +pub const GU_NORMAL_32BITF: i32 = 3 << 5; +pub const GU_VERTEX_8BIT: i32 = 1 << 7; +pub const GU_VERTEX_16BIT: i32 = 2 << 7; +pub const GU_VERTEX_32BITF: i32 = 3 << 7; +pub const GU_WEIGHT_8BIT: i32 = 1 << 9; +pub const GU_WEIGHT_16BIT: i32 = 2 << 9; +pub const GU_WEIGHT_32BITF: i32 = 3 << 9; +pub const GU_INDEX_8BIT: i32 = 1 << 11; +pub const GU_INDEX_16BIT: i32 = 2 << 11; +pub const GU_WEIGHTS1: i32 = (((1 - 1) & 7) << 14) as i32; +pub const GU_WEIGHTS2: i32 = (((2 - 1) & 7) << 14) as i32; +pub const GU_WEIGHTS3: i32 = (((3 - 1) & 7) << 14) as i32; +pub const GU_WEIGHTS4: i32 = (((4 - 1) & 7) << 14) as i32; +pub const GU_WEIGHTS5: i32 = (((5 - 1) & 7) << 14) as i32; +pub const GU_WEIGHTS6: i32 = (((6 - 1) & 7) << 14) as i32; +pub const GU_WEIGHTS7: i32 = (((7 - 1) & 7) << 14) as i32; +pub const GU_WEIGHTS8: i32 = (((8 - 1) & 7) << 14) as i32; +pub const GU_VERTICES1: i32 = (((1 - 1) & 7) << 18) as i32; +pub const GU_VERTICES2: i32 = (((2 - 1) & 7) << 18) as i32; +pub const GU_VERTICES3: i32 = (((3 - 1) & 7) << 18) as i32; +pub const GU_VERTICES4: i32 = (((4 - 1) & 7) << 18) as i32; +pub const GU_VERTICES5: i32 = (((5 - 1) & 7) << 18) as i32; +pub const GU_VERTICES6: i32 = (((6 - 1) & 7) << 18) as i32; +pub const GU_VERTICES7: i32 = (((7 - 1) & 7) << 18) as i32; +pub const GU_VERTICES8: i32 = (((8 - 1) & 7) << 18) as i32; +pub const GU_TRANSFORM_2D: i32 = 1 << 23; +pub const GU_TRANSFORM_3D: i32 = 0; + +pub const GU_COLOR_BUFFER_BIT: i32 = 1; +pub const GU_STENCIL_BUFFER_BIT: i32 = 2; +pub const GU_DEPTH_BUFFER_BIT: i32 = 4; +pub const GU_FAST_CLEAR_BIT: i32 = 16; + +pub const GU_AMBIENT: i32 = 1; +pub const GU_DIFFUSE: i32 = 2; +pub const GU_SPECULAR: i32 = 4; +pub const GU_UNKNOWN_LIGHT_COMPONENT: i32 = 8; + +pub const SYSTEM_REGISTRY: [u8; 7] = *b"/system"; +pub const REG_KEYNAME_SIZE: u32 = 27; + +pub const UTILITY_MSGDIALOG_ERROR: i32 = 0; +pub const UTILITY_MSGDIALOG_TEXT: i32 = 1; +pub const UTILITY_MSGDIALOG_YES_NO_BUTTONS: i32 = 0x10; +pub const UTILITY_MSGDIALOG_DEFAULT_NO: i32 = 0x100; + +pub const UTILITY_HTMLVIEWER_OPEN_SCE_START_PAGE: i32 = 0x000001; +pub const UTILITY_HTMLVIEWER_DISABLE_STARTUP_LIMITS: i32 = 0x000002; +pub const UTILITY_HTMLVIEWER_DISABLE_EXIT_DIALOG: i32 = 0x000004; +pub const UTILITY_HTMLVIEWER_DISABLE_CURSOR: i32 = 0x000008; +pub const UTILITY_HTMLVIEWER_DISABLE_DOWNLOAD_COMPLETE_DIALOG: i32 = 0x000010; +pub const UTILITY_HTMLVIEWER_DISABLE_DOWNLOAD_START_DIALOG: i32 = 0x000020; +pub const UTILITY_HTMLVIEWER_DISABLE_DOWNLOAD_DESTINATION_DIALOG: i32 = + 0x000040; +pub const UTILITY_HTMLVIEWER_LOCK_DOWNLOAD_DESTINATION_DIALOG: i32 = 0x000080; +pub const UTILITY_HTMLVIEWER_DISABLE_TAB_DISPLAY: i32 = 0x000100; +pub const UTILITY_HTMLVIEWER_ENABLE_ANALOG_HOLD: i32 = 0x000200; +pub const UTILITY_HTMLVIEWER_ENABLE_FLASH: i32 = 0x000400; +pub const UTILITY_HTMLVIEWER_DISABLE_LRTRIGGER: i32 = 0x000800; + +extern "C" { + pub fn sceAudioChReserve( + channel: i32, + sample_count: i32, + format: AudioFormat, + ) -> i32; + pub fn sceAudioChRelease(channel: i32) -> i32; + pub fn sceAudioOutput(channel: i32, vol: i32, buf: *mut c_void) -> i32; + pub fn sceAudioOutputBlocking( + channel: i32, + vol: i32, + buf: *mut c_void, + ) -> i32; + pub fn sceAudioOutputPanned( + channel: i32, + left_vol: i32, + right_vol: i32, + buf: *mut c_void, + ) -> i32; + pub fn sceAudioOutputPannedBlocking( + channel: i32, + left_vol: i32, + right_vol: i32, + buf: *mut c_void, + ) -> i32; + pub fn sceAudioGetChannelRestLen(channel: i32) -> i32; + pub fn sceAudioGetChannelRestLength(channel: i32) -> i32; + pub fn sceAudioSetChannelDataLen(channel: i32, sample_count: i32) -> i32; + pub fn sceAudioChangeChannelConfig( + channel: i32, + format: AudioFormat, + ) -> i32; + pub fn sceAudioChangeChannelVolume( + channel: i32, + left_vol: i32, + right_vol: i32, + ) -> i32; + pub fn sceAudioOutput2Reserve(sample_count: i32) -> i32; + pub fn sceAudioOutput2Release() -> i32; + pub fn sceAudioOutput2ChangeLength(sample_count: i32) -> i32; + pub fn sceAudioOutput2OutputBlocking(vol: i32, buf: *mut c_void) -> i32; + pub fn sceAudioOutput2GetRestSample() -> i32; + pub fn sceAudioSRCChReserve( + sample_count: i32, + freq: AudioOutputFrequency, + channels: i32, + ) -> i32; + pub fn sceAudioSRCChRelease() -> i32; + pub fn sceAudioSRCOutputBlocking(vol: i32, buf: *mut c_void) -> i32; + pub fn sceAudioInputInit(unknown1: i32, gain: i32, unknown2: i32) -> i32; + pub fn sceAudioInputInitEx(params: *mut AudioInputParams) -> i32; + pub fn sceAudioInputBlocking( + sample_count: i32, + freq: AudioInputFrequency, + buf: *mut c_void, + ); + pub fn sceAudioInput( + sample_count: i32, + freq: AudioInputFrequency, + buf: *mut c_void, + ); + pub fn sceAudioGetInputLength() -> i32; + pub fn sceAudioWaitInputEnd() -> i32; + pub fn sceAudioPollInputEnd() -> i32; + + pub fn sceAtracGetAtracID(ui_codec_type: u32) -> i32; + pub fn sceAtracSetDataAndGetID(buf: *mut c_void, bufsize: usize) -> i32; + pub fn sceAtracDecodeData( + atrac_id: i32, + out_samples: *mut u16, + out_n: *mut i32, + out_end: *mut i32, + out_remain_frame: *mut i32, + ) -> i32; + pub fn sceAtracGetRemainFrame( + atrac_id: i32, + out_remain_frame: *mut i32, + ) -> i32; + pub fn sceAtracGetStreamDataInfo( + atrac_id: i32, + write_pointer: *mut *mut u8, + available_bytes: *mut u32, + read_offset: *mut u32, + ) -> i32; + pub fn sceAtracAddStreamData(atrac_id: i32, bytes_to_add: u32) -> i32; + pub fn sceAtracGetBitrate(atrac_id: i32, out_bitrate: *mut i32) -> i32; + pub fn sceAtracSetLoopNum(atrac_id: i32, nloops: i32) -> i32; + pub fn sceAtracReleaseAtracID(atrac_id: i32) -> i32; + pub fn sceAtracGetNextSample(atrac_id: i32, out_n: *mut i32) -> i32; + pub fn sceAtracGetMaxSample(atrac_id: i32, out_max: *mut i32) -> i32; + pub fn sceAtracGetBufferInfoForReseting( + atrac_id: i32, + ui_sample: u32, + pbuffer_info: *mut Atrac3BufferInfo, + ) -> i32; + pub fn sceAtracGetChannel(atrac_id: i32, pui_channel: *mut u32) -> i32; + pub fn sceAtracGetInternalErrorInfo( + atrac_id: i32, + pi_result: *mut i32, + ) -> i32; + pub fn sceAtracGetLoopStatus( + atrac_id: i32, + pi_loop_num: *mut i32, + pui_loop_status: *mut u32, + ) -> i32; + pub fn sceAtracGetNextDecodePosition( + atrac_id: i32, + pui_sample_position: *mut u32, + ) -> i32; + pub fn sceAtracGetSecondBufferInfo( + atrac_id: i32, + pui_position: *mut u32, + pui_data_byte: *mut u32, + ) -> i32; + pub fn sceAtracGetSoundSample( + atrac_id: i32, + pi_end_sample: *mut i32, + pi_loop_start_sample: *mut i32, + pi_loop_end_sample: *mut i32, + ) -> i32; + pub fn sceAtracResetPlayPosition( + atrac_id: i32, + ui_sample: u32, + ui_write_byte_first_buf: u32, + ui_write_byte_second_buf: u32, + ) -> i32; + pub fn sceAtracSetData( + atrac_id: i32, + puc_buffer_addr: *mut u8, + ui_buffer_byte: u32, + ) -> i32; + pub fn sceAtracSetHalfwayBuffer( + atrac_id: i32, + puc_buffer_addr: *mut u8, + ui_read_byte: u32, + ui_buffer_byte: u32, + ) -> i32; + pub fn sceAtracSetHalfwayBufferAndGetID( + puc_buffer_addr: *mut u8, + ui_read_byte: u32, + ui_buffer_byte: u32, + ) -> i32; + pub fn sceAtracSetSecondBuffer( + atrac_id: i32, + puc_second_buffer_addr: *mut u8, + ui_second_buffer_byte: u32, + ) -> i32; + + pub fn sceCtrlSetSamplingCycle(cycle: i32) -> i32; + pub fn sceCtrlGetSamplingCycle(pcycle: *mut i32) -> i32; + pub fn sceCtrlSetSamplingMode(mode: CtrlMode) -> i32; + pub fn sceCtrlGetSamplingMode(pmode: *mut i32) -> i32; + pub fn sceCtrlPeekBufferPositive( + pad_data: *mut SceCtrlData, + count: i32, + ) -> i32; + pub fn sceCtrlPeekBufferNegative( + pad_data: *mut SceCtrlData, + count: i32, + ) -> i32; + pub fn sceCtrlReadBufferPositive( + pad_data: *mut SceCtrlData, + count: i32, + ) -> i32; + pub fn sceCtrlReadBufferNegative( + pad_data: *mut SceCtrlData, + count: i32, + ) -> i32; + pub fn sceCtrlPeekLatch(latch_data: *mut SceCtrlLatch) -> i32; + pub fn sceCtrlReadLatch(latch_data: *mut SceCtrlLatch) -> i32; + pub fn sceCtrlSetIdleCancelThreshold(idlereset: i32, idleback: i32) + -> i32; + pub fn sceCtrlGetIdleCancelThreshold( + idlereset: *mut i32, + idleback: *mut i32, + ) -> i32; + + pub fn sceDisplaySetMode( + mode: DisplayMode, + width: usize, + height: usize, + ) -> u32; + pub fn sceDisplayGetMode( + pmode: *mut i32, + pwidth: *mut i32, + pheight: *mut i32, + ) -> i32; + pub fn sceDisplaySetFrameBuf( + top_addr: *const u8, + buffer_width: usize, + pixel_format: DisplayPixelFormat, + sync: DisplaySetBufSync, + ) -> u32; + pub fn sceDisplayGetFrameBuf( + top_addr: *mut *mut c_void, + buffer_width: *mut usize, + pixel_format: *mut DisplayPixelFormat, + sync: DisplaySetBufSync, + ) -> i32; + pub fn sceDisplayGetVcount() -> u32; + pub fn sceDisplayWaitVblank() -> i32; + pub fn sceDisplayWaitVblankCB() -> i32; + pub fn sceDisplayWaitVblankStart() -> i32; + pub fn sceDisplayWaitVblankStartCB() -> i32; + pub fn sceDisplayGetAccumulatedHcount() -> i32; + pub fn sceDisplayGetCurrentHcount() -> i32; + pub fn sceDisplayGetFramePerSec() -> f32; + pub fn sceDisplayIsForeground() -> i32; + pub fn sceDisplayIsVblank() -> i32; + + pub fn sceGeEdramGetSize() -> u32; + pub fn sceGeEdramGetAddr() -> *mut u8; + pub fn sceGeEdramSetAddrTranslation(width: i32) -> i32; + pub fn sceGeGetCmd(cmd: i32) -> u32; + pub fn sceGeGetMtx(type_: GeMatrixType, matrix: *mut c_void) -> i32; + pub fn sceGeGetStack(stack_id: i32, stack: *mut GeStack) -> i32; + pub fn sceGeSaveContext(context: *mut GeContext) -> i32; + pub fn sceGeRestoreContext(context: *const GeContext) -> i32; + pub fn sceGeListEnQueue( + list: *const c_void, + stall: *mut c_void, + cbid: i32, + arg: *mut GeListArgs, + ) -> i32; + pub fn sceGeListEnQueueHead( + list: *const c_void, + stall: *mut c_void, + cbid: i32, + arg: *mut GeListArgs, + ) -> i32; + pub fn sceGeListDeQueue(qid: i32) -> i32; + pub fn sceGeListUpdateStallAddr(qid: i32, stall: *mut c_void) -> i32; + pub fn sceGeListSync(qid: i32, sync_type: i32) -> GeListState; + pub fn sceGeDrawSync(sync_type: i32) -> GeListState; + pub fn sceGeBreak(mode: i32, p_param: *mut GeBreakParam) -> i32; + pub fn sceGeContinue() -> i32; + pub fn sceGeSetCallback(cb: *mut GeCallbackData) -> i32; + pub fn sceGeUnsetCallback(cbid: i32) -> i32; + + pub fn sceKernelExitGame(); + pub fn sceKernelRegisterExitCallback(id: SceUid) -> i32; + pub fn sceKernelLoadExec( + file: *const u8, + param: *mut SceKernelLoadExecParam, + ) -> i32; + + pub fn sceKernelAllocPartitionMemory( + partition: SceSysMemPartitionId, + name: *const u8, + type_: SceSysMemBlockTypes, + size: u32, + addr: *mut c_void, + ) -> SceUid; + pub fn sceKernelGetBlockHeadAddr(blockid: SceUid) -> *mut c_void; + pub fn sceKernelFreePartitionMemory(blockid: SceUid) -> i32; + pub fn sceKernelTotalFreeMemSize() -> usize; + pub fn sceKernelMaxFreeMemSize() -> usize; + pub fn sceKernelDevkitVersion() -> u32; + pub fn sceKernelSetCompiledSdkVersion(version: u32) -> i32; + pub fn sceKernelGetCompiledSdkVersion() -> u32; + + pub fn sceKernelLibcTime(t: *mut i32) -> i32; + pub fn sceKernelLibcClock() -> u32; + pub fn sceKernelLibcGettimeofday( + tp: *mut timeval, + tzp: *mut timezone, + ) -> i32; + pub fn sceKernelDcacheWritebackAll(); + pub fn sceKernelDcacheWritebackInvalidateAll(); + pub fn sceKernelDcacheWritebackRange(p: *const c_void, size: u32); + pub fn sceKernelDcacheWritebackInvalidateRange( + p: *const c_void, + size: u32, + ); + pub fn sceKernelDcacheInvalidateRange(p: *const c_void, size: u32); + pub fn sceKernelIcacheInvalidateAll(); + pub fn sceKernelIcacheInvalidateRange(p: *const c_void, size: u32); + pub fn sceKernelUtilsMt19937Init( + ctx: *mut SceKernelUtilsMt19937Context, + seed: u32, + ) -> i32; + pub fn sceKernelUtilsMt19937UInt( + ctx: *mut SceKernelUtilsMt19937Context, + ) -> u32; + pub fn sceKernelUtilsMd5Digest( + data: *mut u8, + size: u32, + digest: *mut u8, + ) -> i32; + pub fn sceKernelUtilsMd5BlockInit( + ctx: *mut SceKernelUtilsMd5Context, + ) -> i32; + pub fn sceKernelUtilsMd5BlockUpdate( + ctx: *mut SceKernelUtilsMd5Context, + data: *mut u8, + size: u32, + ) -> i32; + pub fn sceKernelUtilsMd5BlockResult( + ctx: *mut SceKernelUtilsMd5Context, + digest: *mut u8, + ) -> i32; + pub fn sceKernelUtilsSha1Digest( + data: *mut u8, + size: u32, + digest: *mut u8, + ) -> i32; + pub fn sceKernelUtilsSha1BlockInit( + ctx: *mut SceKernelUtilsSha1Context, + ) -> i32; + pub fn sceKernelUtilsSha1BlockUpdate( + ctx: *mut SceKernelUtilsSha1Context, + data: *mut u8, + size: u32, + ) -> i32; + pub fn sceKernelUtilsSha1BlockResult( + ctx: *mut SceKernelUtilsSha1Context, + digest: *mut u8, + ) -> i32; + + pub fn sceKernelRegisterSubIntrHandler( + int_no: i32, + no: i32, + handler: *mut c_void, + arg: *mut c_void, + ) -> i32; + pub fn sceKernelReleaseSubIntrHandler(int_no: i32, no: i32) -> i32; + pub fn sceKernelEnableSubIntr(int_no: i32, no: i32) -> i32; + pub fn sceKernelDisableSubIntr(int_no: i32, no: i32) -> i32; + pub fn QueryIntrHandlerInfo( + intr_code: SceUid, + sub_intr_code: SceUid, + data: *mut IntrHandlerOptionParam, + ) -> i32; + + pub fn sceKernelCpuSuspendIntr() -> u32; + pub fn sceKernelCpuResumeIntr(flags: u32); + pub fn sceKernelCpuResumeIntrWithSync(flags: u32); + pub fn sceKernelIsCpuIntrSuspended(flags: u32) -> i32; + pub fn sceKernelIsCpuIntrEnable() -> i32; + + pub fn sceKernelLoadModule( + path: *const u8, + flags: i32, + option: *mut SceKernelLMOption, + ) -> SceUid; + pub fn sceKernelLoadModuleMs( + path: *const u8, + flags: i32, + option: *mut SceKernelLMOption, + ) -> SceUid; + pub fn sceKernelLoadModuleByID( + fid: SceUid, + flags: i32, + option: *mut SceKernelLMOption, + ) -> SceUid; + pub fn sceKernelLoadModuleBufferUsbWlan( + buf_size: usize, + buf: *mut c_void, + flags: i32, + option: *mut SceKernelLMOption, + ) -> SceUid; + pub fn sceKernelStartModule( + mod_id: SceUid, + arg_size: usize, + argp: *mut c_void, + status: *mut i32, + option: *mut SceKernelSMOption, + ) -> i32; + pub fn sceKernelStopModule( + mod_id: SceUid, + arg_size: usize, + argp: *mut c_void, + status: *mut i32, + option: *mut SceKernelSMOption, + ) -> i32; + pub fn sceKernelUnloadModule(mod_id: SceUid) -> i32; + pub fn sceKernelSelfStopUnloadModule( + unknown: i32, + arg_size: usize, + argp: *mut c_void, + ) -> i32; + pub fn sceKernelStopUnloadSelfModule( + arg_size: usize, + argp: *mut c_void, + status: *mut i32, + option: *mut SceKernelSMOption, + ) -> i32; + pub fn sceKernelQueryModuleInfo( + mod_id: SceUid, + info: *mut SceKernelModuleInfo, + ) -> i32; + pub fn sceKernelGetModuleIdList( + read_buf: *mut SceUid, + read_buf_size: i32, + id_count: *mut i32, + ) -> i32; + + pub fn sceKernelVolatileMemLock( + unk: i32, + ptr: *mut *mut c_void, + size: *mut i32, + ) -> i32; + pub fn sceKernelVolatileMemTryLock( + unk: i32, + ptr: *mut *mut c_void, + size: *mut i32, + ) -> i32; + pub fn sceKernelVolatileMemUnlock(unk: i32) -> i32; + + pub fn sceKernelStdin() -> SceUid; + pub fn sceKernelStdout() -> SceUid; + pub fn sceKernelStderr() -> SceUid; + + pub fn sceKernelGetThreadmanIdType(uid: SceUid) -> SceKernelIdListType; + pub fn sceKernelCreateThread( + name: *const u8, + entry: SceKernelThreadEntry, + init_priority: i32, + stack_size: i32, + attr: i32, + option: *mut SceKernelThreadOptParam, + ) -> SceUid; + pub fn sceKernelDeleteThread(thid: SceUid) -> i32; + pub fn sceKernelStartThread( + id: SceUid, + arg_len: usize, + arg_p: *mut c_void, + ) -> i32; + pub fn sceKernelExitThread(status: i32) -> i32; + pub fn sceKernelExitDeleteThread(status: i32) -> i32; + pub fn sceKernelTerminateThread(thid: SceUid) -> i32; + pub fn sceKernelTerminateDeleteThread(thid: SceUid) -> i32; + pub fn sceKernelSuspendDispatchThread() -> i32; + pub fn sceKernelResumeDispatchThread(state: i32) -> i32; + pub fn sceKernelSleepThread() -> i32; + pub fn sceKernelSleepThreadCB() -> i32; + pub fn sceKernelWakeupThread(thid: SceUid) -> i32; + pub fn sceKernelCancelWakeupThread(thid: SceUid) -> i32; + pub fn sceKernelSuspendThread(thid: SceUid) -> i32; + pub fn sceKernelResumeThread(thid: SceUid) -> i32; + pub fn sceKernelWaitThreadEnd(thid: SceUid, timeout: *mut u32) -> i32; + pub fn sceKernelWaitThreadEndCB(thid: SceUid, timeout: *mut u32) -> i32; + pub fn sceKernelDelayThread(delay: u32) -> i32; + pub fn sceKernelDelayThreadCB(delay: u32) -> i32; + pub fn sceKernelDelaySysClockThread(delay: *mut SceKernelSysClock) -> i32; + pub fn sceKernelDelaySysClockThreadCB( + delay: *mut SceKernelSysClock, + ) -> i32; + pub fn sceKernelChangeCurrentThreadAttr(unknown: i32, attr: i32) -> i32; + pub fn sceKernelChangeThreadPriority(thid: SceUid, priority: i32) -> i32; + pub fn sceKernelRotateThreadReadyQueue(priority: i32) -> i32; + pub fn sceKernelReleaseWaitThread(thid: SceUid) -> i32; + pub fn sceKernelGetThreadId() -> i32; + pub fn sceKernelGetThreadCurrentPriority() -> i32; + pub fn sceKernelGetThreadExitStatus(thid: SceUid) -> i32; + pub fn sceKernelCheckThreadStack() -> i32; + pub fn sceKernelGetThreadStackFreeSize(thid: SceUid) -> i32; + pub fn sceKernelReferThreadStatus( + thid: SceUid, + info: *mut SceKernelThreadInfo, + ) -> i32; + pub fn sceKernelReferThreadRunStatus( + thid: SceUid, + status: *mut SceKernelThreadRunStatus, + ) -> i32; + pub fn sceKernelCreateSema( + name: *const u8, + attr: u32, + init_val: i32, + max_val: i32, + option: *mut SceKernelSemaOptParam, + ) -> SceUid; + pub fn sceKernelDeleteSema(sema_id: SceUid) -> i32; + pub fn sceKernelSignalSema(sema_id: SceUid, signal: i32) -> i32; + pub fn sceKernelWaitSema( + sema_id: SceUid, + signal: i32, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelWaitSemaCB( + sema_id: SceUid, + signal: i32, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelPollSema(sema_id: SceUid, signal: i32) -> i32; + pub fn sceKernelReferSemaStatus( + sema_id: SceUid, + info: *mut SceKernelSemaInfo, + ) -> i32; + pub fn sceKernelCreateEventFlag( + name: *const u8, + attr: i32, + bits: i32, + opt: *mut SceKernelEventFlagOptParam, + ) -> SceUid; + pub fn sceKernelSetEventFlag(ev_id: SceUid, bits: u32) -> i32; + pub fn sceKernelClearEventFlag(ev_id: SceUid, bits: u32) -> i32; + pub fn sceKernelPollEventFlag( + ev_id: SceUid, + bits: u32, + wait: i32, + out_bits: *mut u32, + ) -> i32; + pub fn sceKernelWaitEventFlag( + ev_id: SceUid, + bits: u32, + wait: i32, + out_bits: *mut u32, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelWaitEventFlagCB( + ev_id: SceUid, + bits: u32, + wait: i32, + out_bits: *mut u32, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelDeleteEventFlag(ev_id: SceUid) -> i32; + pub fn sceKernelReferEventFlagStatus( + event: SceUid, + status: *mut SceKernelEventFlagInfo, + ) -> i32; + pub fn sceKernelCreateMbx( + name: *const u8, + attr: u32, + option: *mut SceKernelMbxOptParam, + ) -> SceUid; + pub fn sceKernelDeleteMbx(mbx_id: SceUid) -> i32; + pub fn sceKernelSendMbx(mbx_id: SceUid, message: *mut c_void) -> i32; + pub fn sceKernelReceiveMbx( + mbx_id: SceUid, + message: *mut *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelReceiveMbxCB( + mbx_id: SceUid, + message: *mut *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelPollMbx(mbx_id: SceUid, pmessage: *mut *mut c_void) + -> i32; + pub fn sceKernelCancelReceiveMbx(mbx_id: SceUid, num: *mut i32) -> i32; + pub fn sceKernelReferMbxStatus( + mbx_id: SceUid, + info: *mut SceKernelMbxInfo, + ) -> i32; + pub fn sceKernelSetAlarm( + clock: u32, + handler: SceKernelAlarmHandler, + common: *mut c_void, + ) -> SceUid; + pub fn sceKernelSetSysClockAlarm( + clock: *mut SceKernelSysClock, + handler: *mut SceKernelAlarmHandler, + common: *mut c_void, + ) -> SceUid; + pub fn sceKernelCancelAlarm(alarm_id: SceUid) -> i32; + pub fn sceKernelReferAlarmStatus( + alarm_id: SceUid, + info: *mut SceKernelAlarmInfo, + ) -> i32; + pub fn sceKernelCreateCallback( + name: *const u8, + func: SceKernelCallbackFunction, + arg: *mut c_void, + ) -> SceUid; + pub fn sceKernelReferCallbackStatus( + cb: SceUid, + status: *mut SceKernelCallbackInfo, + ) -> i32; + pub fn sceKernelDeleteCallback(cb: SceUid) -> i32; + pub fn sceKernelNotifyCallback(cb: SceUid, arg2: i32) -> i32; + pub fn sceKernelCancelCallback(cb: SceUid) -> i32; + pub fn sceKernelGetCallbackCount(cb: SceUid) -> i32; + pub fn sceKernelCheckCallback() -> i32; + pub fn sceKernelGetThreadmanIdList( + type_: SceKernelIdListType, + read_buf: *mut SceUid, + read_buf_size: i32, + id_count: *mut i32, + ) -> i32; + pub fn sceKernelReferSystemStatus( + status: *mut SceKernelSystemStatus, + ) -> i32; + pub fn sceKernelCreateMsgPipe( + name: *const u8, + part: i32, + attr: i32, + unk1: *mut c_void, + opt: *mut c_void, + ) -> SceUid; + pub fn sceKernelDeleteMsgPipe(uid: SceUid) -> i32; + pub fn sceKernelSendMsgPipe( + uid: SceUid, + message: *mut c_void, + size: u32, + unk1: i32, + unk2: *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelSendMsgPipeCB( + uid: SceUid, + message: *mut c_void, + size: u32, + unk1: i32, + unk2: *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelTrySendMsgPipe( + uid: SceUid, + message: *mut c_void, + size: u32, + unk1: i32, + unk2: *mut c_void, + ) -> i32; + pub fn sceKernelReceiveMsgPipe( + uid: SceUid, + message: *mut c_void, + size: u32, + unk1: i32, + unk2: *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelReceiveMsgPipeCB( + uid: SceUid, + message: *mut c_void, + size: u32, + unk1: i32, + unk2: *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelTryReceiveMsgPipe( + uid: SceUid, + message: *mut c_void, + size: u32, + unk1: i32, + unk2: *mut c_void, + ) -> i32; + pub fn sceKernelCancelMsgPipe( + uid: SceUid, + send: *mut i32, + recv: *mut i32, + ) -> i32; + pub fn sceKernelReferMsgPipeStatus( + uid: SceUid, + info: *mut SceKernelMppInfo, + ) -> i32; + pub fn sceKernelCreateVpl( + name: *const u8, + part: i32, + attr: i32, + size: u32, + opt: *mut SceKernelVplOptParam, + ) -> SceUid; + pub fn sceKernelDeleteVpl(uid: SceUid) -> i32; + pub fn sceKernelAllocateVpl( + uid: SceUid, + size: u32, + data: *mut *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelAllocateVplCB( + uid: SceUid, + size: u32, + data: *mut *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelTryAllocateVpl( + uid: SceUid, + size: u32, + data: *mut *mut c_void, + ) -> i32; + pub fn sceKernelFreeVpl(uid: SceUid, data: *mut c_void) -> i32; + pub fn sceKernelCancelVpl(uid: SceUid, num: *mut i32) -> i32; + pub fn sceKernelReferVplStatus( + uid: SceUid, + info: *mut SceKernelVplInfo, + ) -> i32; + pub fn sceKernelCreateFpl( + name: *const u8, + part: i32, + attr: i32, + size: u32, + blocks: u32, + opt: *mut SceKernelFplOptParam, + ) -> i32; + pub fn sceKernelDeleteFpl(uid: SceUid) -> i32; + pub fn sceKernelAllocateFpl( + uid: SceUid, + data: *mut *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelAllocateFplCB( + uid: SceUid, + data: *mut *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelTryAllocateFpl(uid: SceUid, data: *mut *mut c_void) + -> i32; + pub fn sceKernelFreeFpl(uid: SceUid, data: *mut c_void) -> i32; + pub fn sceKernelCancelFpl(uid: SceUid, pnum: *mut i32) -> i32; + pub fn sceKernelReferFplStatus( + uid: SceUid, + info: *mut SceKernelFplInfo, + ) -> i32; + pub fn sceKernelUSec2SysClock( + usec: u32, + clock: *mut SceKernelSysClock, + ) -> i32; + pub fn sceKernelUSec2SysClockWide(usec: u32) -> i64; + pub fn sceKernelSysClock2USec( + clock: *mut SceKernelSysClock, + low: *mut u32, + high: *mut u32, + ) -> i32; + pub fn sceKernelSysClock2USecWide( + clock: i64, + low: *mut u32, + high: *mut u32, + ) -> i32; + pub fn sceKernelGetSystemTime(time: *mut SceKernelSysClock) -> i32; + pub fn sceKernelGetSystemTimeWide() -> i64; + pub fn sceKernelGetSystemTimeLow() -> u32; + pub fn sceKernelCreateVTimer( + name: *const u8, + opt: *mut SceKernelVTimerOptParam, + ) -> SceUid; + pub fn sceKernelDeleteVTimer(uid: SceUid) -> i32; + pub fn sceKernelGetVTimerBase( + uid: SceUid, + base: *mut SceKernelSysClock, + ) -> i32; + pub fn sceKernelGetVTimerBaseWide(uid: SceUid) -> i64; + pub fn sceKernelGetVTimerTime( + uid: SceUid, + time: *mut SceKernelSysClock, + ) -> i32; + pub fn sceKernelGetVTimerTimeWide(uid: SceUid) -> i64; + pub fn sceKernelSetVTimerTime( + uid: SceUid, + time: *mut SceKernelSysClock, + ) -> i32; + pub fn sceKernelSetVTimerTimeWide(uid: SceUid, time: i64) -> i64; + pub fn sceKernelStartVTimer(uid: SceUid) -> i32; + pub fn sceKernelStopVTimer(uid: SceUid) -> i32; + pub fn sceKernelSetVTimerHandler( + uid: SceUid, + time: *mut SceKernelSysClock, + handler: SceKernelVTimerHandler, + common: *mut c_void, + ) -> i32; + pub fn sceKernelSetVTimerHandlerWide( + uid: SceUid, + time: i64, + handler: SceKernelVTimerHandlerWide, + common: *mut c_void, + ) -> i32; + pub fn sceKernelCancelVTimerHandler(uid: SceUid) -> i32; + pub fn sceKernelReferVTimerStatus( + uid: SceUid, + info: *mut SceKernelVTimerInfo, + ) -> i32; + pub fn sceKernelRegisterThreadEventHandler( + name: *const u8, + thread_id: SceUid, + mask: i32, + handler: SceKernelThreadEventHandler, + common: *mut c_void, + ) -> SceUid; + pub fn sceKernelReleaseThreadEventHandler(uid: SceUid) -> i32; + pub fn sceKernelReferThreadEventHandlerStatus( + uid: SceUid, + info: *mut SceKernelThreadEventHandlerInfo, + ) -> i32; + pub fn sceKernelReferThreadProfiler() -> *mut DebugProfilerRegs; + pub fn sceKernelReferGlobalProfiler() -> *mut DebugProfilerRegs; + + pub fn sceUsbStart( + driver_name: *const u8, + size: i32, + args: *mut c_void, + ) -> i32; + pub fn sceUsbStop( + driver_name: *const u8, + size: i32, + args: *mut c_void, + ) -> i32; + pub fn sceUsbActivate(pid: u32) -> i32; + pub fn sceUsbDeactivate(pid: u32) -> i32; + pub fn sceUsbGetState() -> i32; + pub fn sceUsbGetDrvState(driver_name: *const u8) -> i32; +} + +extern "C" { + pub fn sceUsbCamSetupStill(param: *mut UsbCamSetupStillParam) -> i32; + pub fn sceUsbCamSetupStillEx(param: *mut UsbCamSetupStillExParam) -> i32; + pub fn sceUsbCamStillInputBlocking(buf: *mut u8, size: usize) -> i32; + pub fn sceUsbCamStillInput(buf: *mut u8, size: usize) -> i32; + pub fn sceUsbCamStillWaitInputEnd() -> i32; + pub fn sceUsbCamStillPollInputEnd() -> i32; + pub fn sceUsbCamStillCancelInput() -> i32; + pub fn sceUsbCamStillGetInputLength() -> i32; + pub fn sceUsbCamSetupVideo( + param: *mut UsbCamSetupVideoParam, + work_area: *mut c_void, + work_area_size: i32, + ) -> i32; + pub fn sceUsbCamSetupVideoEx( + param: *mut UsbCamSetupVideoExParam, + work_area: *mut c_void, + work_area_size: i32, + ) -> i32; + pub fn sceUsbCamStartVideo() -> i32; + pub fn sceUsbCamStopVideo() -> i32; + pub fn sceUsbCamReadVideoFrameBlocking(buf: *mut u8, size: usize) -> i32; + pub fn sceUsbCamReadVideoFrame(buf: *mut u8, size: usize) -> i32; + pub fn sceUsbCamWaitReadVideoFrameEnd() -> i32; + pub fn sceUsbCamPollReadVideoFrameEnd() -> i32; + pub fn sceUsbCamGetReadVideoFrameSize() -> i32; + pub fn sceUsbCamSetSaturation(saturation: i32) -> i32; + pub fn sceUsbCamSetBrightness(brightness: i32) -> i32; + pub fn sceUsbCamSetContrast(contrast: i32) -> i32; + pub fn sceUsbCamSetSharpness(sharpness: i32) -> i32; + pub fn sceUsbCamSetImageEffectMode(effect_mode: UsbCamEffectMode) -> i32; + pub fn sceUsbCamSetEvLevel(exposure_level: UsbCamEvLevel) -> i32; + pub fn sceUsbCamSetReverseMode(reverse_flags: i32) -> i32; + pub fn sceUsbCamSetZoom(zoom: i32) -> i32; + pub fn sceUsbCamGetSaturation(saturation: *mut i32) -> i32; + pub fn sceUsbCamGetBrightness(brightness: *mut i32) -> i32; + pub fn sceUsbCamGetContrast(contrast: *mut i32) -> i32; + pub fn sceUsbCamGetSharpness(sharpness: *mut i32) -> i32; + pub fn sceUsbCamGetImageEffectMode( + effect_mode: *mut UsbCamEffectMode, + ) -> i32; + pub fn sceUsbCamGetEvLevel(exposure_level: *mut UsbCamEvLevel) -> i32; + pub fn sceUsbCamGetReverseMode(reverse_flags: *mut i32) -> i32; + pub fn sceUsbCamGetZoom(zoom: *mut i32) -> i32; + pub fn sceUsbCamAutoImageReverseSW(on: i32) -> i32; + pub fn sceUsbCamGetAutoImageReverseState() -> i32; + pub fn sceUsbCamGetLensDirection() -> i32; + + pub fn sceUsbstorBootRegisterNotify(event_flag: SceUid) -> i32; + pub fn sceUsbstorBootUnregisterNotify(event_flag: u32) -> i32; + pub fn sceUsbstorBootSetCapacity(size: u32) -> i32; + + pub fn scePowerRegisterCallback(slot: i32, cbid: SceUid) -> i32; + pub fn scePowerUnregisterCallback(slot: i32) -> i32; + pub fn scePowerIsPowerOnline() -> i32; + pub fn scePowerIsBatteryExist() -> i32; + pub fn scePowerIsBatteryCharging() -> i32; + pub fn scePowerGetBatteryChargingStatus() -> i32; + pub fn scePowerIsLowBattery() -> i32; + pub fn scePowerGetBatteryLifePercent() -> i32; + pub fn scePowerGetBatteryLifeTime() -> i32; + pub fn scePowerGetBatteryTemp() -> i32; + pub fn scePowerGetBatteryElec() -> i32; + pub fn scePowerGetBatteryVolt() -> i32; + pub fn scePowerSetCpuClockFrequency(cpufreq: i32) -> i32; + pub fn scePowerSetBusClockFrequency(busfreq: i32) -> i32; + pub fn scePowerGetCpuClockFrequency() -> i32; + pub fn scePowerGetCpuClockFrequencyInt() -> i32; + pub fn scePowerGetCpuClockFrequencyFloat() -> f32; + pub fn scePowerGetBusClockFrequency() -> i32; + pub fn scePowerGetBusClockFrequencyInt() -> i32; + pub fn scePowerGetBusClockFrequencyFloat() -> f32; + pub fn scePowerSetClockFrequency( + pllfreq: i32, + cpufreq: i32, + busfreq: i32, + ) -> i32; + pub fn scePowerLock(unknown: i32) -> i32; + pub fn scePowerUnlock(unknown: i32) -> i32; + pub fn scePowerTick(t: PowerTick) -> i32; + pub fn scePowerGetIdleTimer() -> i32; + pub fn scePowerIdleTimerEnable(unknown: i32) -> i32; + pub fn scePowerIdleTimerDisable(unknown: i32) -> i32; + pub fn scePowerRequestStandby() -> i32; + pub fn scePowerRequestSuspend() -> i32; + + pub fn sceWlanDevIsPowerOn() -> i32; + pub fn sceWlanGetSwitchState() -> i32; + pub fn sceWlanGetEtherAddr(ether_addr: *mut u8) -> i32; + + pub fn sceWlanDevAttach() -> i32; + pub fn sceWlanDevDetach() -> i32; + + pub fn sceRtcGetTickResolution() -> u32; + pub fn sceRtcGetCurrentTick(tick: *mut u64) -> i32; + pub fn sceRtcGetCurrentClock(tm: *mut ScePspDateTime, tz: i32) -> i32; + pub fn sceRtcGetCurrentClockLocalTime(tm: *mut ScePspDateTime) -> i32; + pub fn sceRtcConvertUtcToLocalTime( + tick_utc: *const u64, + tick_local: *mut u64, + ) -> i32; + pub fn sceRtcConvertLocalTimeToUTC( + tick_local: *const u64, + tick_utc: *mut u64, + ) -> i32; + pub fn sceRtcIsLeapYear(year: i32) -> i32; + pub fn sceRtcGetDaysInMonth(year: i32, month: i32) -> i32; + pub fn sceRtcGetDayOfWeek(year: i32, month: i32, day: i32) -> i32; + pub fn sceRtcCheckValid(date: *const ScePspDateTime) -> i32; + pub fn sceRtcSetTick(date: *mut ScePspDateTime, tick: *const u64) -> i32; + pub fn sceRtcGetTick(date: *const ScePspDateTime, tick: *mut u64) -> i32; + pub fn sceRtcCompareTick(tick1: *const u64, tick2: *const u64) -> i32; + pub fn sceRtcTickAddTicks( + dest_tick: *mut u64, + src_tick: *const u64, + num_ticks: u64, + ) -> i32; + pub fn sceRtcTickAddMicroseconds( + dest_tick: *mut u64, + src_tick: *const u64, + num_ms: u64, + ) -> i32; + pub fn sceRtcTickAddSeconds( + dest_tick: *mut u64, + src_tick: *const u64, + num_seconds: u64, + ) -> i32; + pub fn sceRtcTickAddMinutes( + dest_tick: *mut u64, + src_tick: *const u64, + num_minutes: u64, + ) -> i32; + pub fn sceRtcTickAddHours( + dest_tick: *mut u64, + src_tick: *const u64, + num_hours: u64, + ) -> i32; + pub fn sceRtcTickAddDays( + dest_tick: *mut u64, + src_tick: *const u64, + num_days: u64, + ) -> i32; + pub fn sceRtcTickAddWeeks( + dest_tick: *mut u64, + src_tick: *const u64, + num_weeks: u64, + ) -> i32; + pub fn sceRtcTickAddMonths( + dest_tick: *mut u64, + src_tick: *const u64, + num_months: u64, + ) -> i32; + pub fn sceRtcTickAddYears( + dest_tick: *mut u64, + src_tick: *const u64, + num_years: u64, + ) -> i32; + pub fn sceRtcSetTime_t(date: *mut ScePspDateTime, time: u32) -> i32; + pub fn sceRtcGetTime_t(date: *const ScePspDateTime, time: *mut u32) -> i32; + pub fn sceRtcSetTime64_t(date: *mut ScePspDateTime, time: u64) -> i32; + pub fn sceRtcGetTime64_t( + date: *const ScePspDateTime, time: *mut u64 + ) -> i32; + pub fn sceRtcSetDosTime(date: *mut ScePspDateTime, dos_time: u32) -> i32; + pub fn sceRtcGetDosTime(date: *mut ScePspDateTime, dos_time: u32) -> i32; + pub fn sceRtcSetWin32FileTime( + date: *mut ScePspDateTime, + time: *mut u64, + ) -> i32; + pub fn sceRtcGetWin32FileTime( + date: *mut ScePspDateTime, + time: *mut u64, + ) -> i32; + pub fn sceRtcParseDateTime( + dest_tick: *mut u64, + date_string: *const u8, + ) -> i32; + pub fn sceRtcFormatRFC3339( + psz_date_time: *mut char, + p_utc: *const u64, + time_zone_minutes: i32, + ) -> i32; + pub fn sceRtcFormatRFC3339LocalTime( + psz_date_time: *mut char, + p_utc: *const u64, + ) -> i32; + pub fn sceRtcParseRFC3339( + p_utc: *mut u64, + psz_date_time: *const u8, + ) -> i32; + pub fn sceRtcFormatRFC2822( + psz_date_time: *mut char, + p_utc: *const u64, + time_zone_minutes: i32, + ) -> i32; + pub fn sceRtcFormatRFC2822LocalTime( + psz_date_time: *mut char, + p_utc: *const u64, + ) -> i32; + + pub fn sceIoOpen( + file: *const u8, + flags: i32, + permissions: IoPermissions, + ) -> SceUid; + pub fn sceIoOpenAsync( + file: *const u8, + flags: i32, + permissions: IoPermissions, + ) -> SceUid; + pub fn sceIoClose(fd: SceUid) -> i32; + pub fn sceIoCloseAsync(fd: SceUid) -> i32; + pub fn sceIoRead(fd: SceUid, data: *mut c_void, size: u32) -> i32; + pub fn sceIoReadAsync(fd: SceUid, data: *mut c_void, size: u32) -> i32; + pub fn sceIoWrite(fd: SceUid, data: *const c_void, size: usize) -> i32; + pub fn sceIoWriteAsync(fd: SceUid, data: *const c_void, size: u32) -> i32; + pub fn sceIoLseek(fd: SceUid, offset: i64, whence: IoWhence) -> i64; + pub fn sceIoLseekAsync(fd: SceUid, offset: i64, whence: IoWhence) -> i32; + pub fn sceIoLseek32(fd: SceUid, offset: i32, whence: IoWhence) -> i32; + pub fn sceIoLseek32Async(fd: SceUid, offset: i32, whence: IoWhence) + -> i32; + pub fn sceIoRemove(file: *const u8) -> i32; + pub fn sceIoMkdir(dir: *const u8, mode: IoPermissions) -> i32; + pub fn sceIoRmdir(path: *const u8) -> i32; + pub fn sceIoChdir(path: *const u8) -> i32; + pub fn sceIoRename(oldname: *const u8, newname: *const u8) -> i32; + pub fn sceIoDopen(dirname: *const u8) -> SceUid; + pub fn sceIoDread(fd: SceUid, dir: *mut SceIoDirent) -> i32; + pub fn sceIoDclose(fd: SceUid) -> i32; + pub fn sceIoDevctl( + dev: *const u8, + cmd: u32, + indata: *mut c_void, + inlen: i32, + outdata: *mut c_void, + outlen: i32, + ) -> i32; + pub fn sceIoAssign( + dev1: *const u8, + dev2: *const u8, + dev3: *const u8, + mode: IoAssignPerms, + unk1: *mut c_void, + unk2: i32, + ) -> i32; + pub fn sceIoUnassign(dev: *const u8) -> i32; + pub fn sceIoGetstat(file: *const u8, stat: *mut SceIoStat) -> i32; + pub fn sceIoChstat( + file: *const u8, + stat: *mut SceIoStat, + bits: i32, + ) -> i32; + pub fn sceIoIoctl( + fd: SceUid, + cmd: u32, + indata: *mut c_void, + inlen: i32, + outdata: *mut c_void, + outlen: i32, + ) -> i32; + pub fn sceIoIoctlAsync( + fd: SceUid, + cmd: u32, + indata: *mut c_void, + inlen: i32, + outdata: *mut c_void, + outlen: i32, + ) -> i32; + pub fn sceIoSync(device: *const u8, unk: u32) -> i32; + pub fn sceIoWaitAsync(fd: SceUid, res: *mut i64) -> i32; + pub fn sceIoWaitAsyncCB(fd: SceUid, res: *mut i64) -> i32; + pub fn sceIoPollAsync(fd: SceUid, res: *mut i64) -> i32; + pub fn sceIoGetAsyncStat(fd: SceUid, poll: i32, res: *mut i64) -> i32; + pub fn sceIoCancel(fd: SceUid) -> i32; + pub fn sceIoGetDevType(fd: SceUid) -> i32; + pub fn sceIoChangeAsyncPriority(fd: SceUid, pri: i32) -> i32; + pub fn sceIoSetAsyncCallback( + fd: SceUid, + cb: SceUid, + argp: *mut c_void, + ) -> i32; + + pub fn sceJpegInitMJpeg() -> i32; + pub fn sceJpegFinishMJpeg() -> i32; + pub fn sceJpegCreateMJpeg(width: i32, height: i32) -> i32; + pub fn sceJpegDeleteMJpeg() -> i32; + pub fn sceJpegDecodeMJpeg( + jpeg_buf: *mut u8, + size: usize, + rgba: *mut c_void, + unk: u32, + ) -> i32; + + pub fn sceUmdCheckMedium() -> i32; + pub fn sceUmdGetDiscInfo(info: *mut UmdInfo) -> i32; + pub fn sceUmdActivate(unit: i32, drive: *const u8) -> i32; + pub fn sceUmdDeactivate(unit: i32, drive: *const u8) -> i32; + pub fn sceUmdWaitDriveStat(state: i32) -> i32; + pub fn sceUmdWaitDriveStatWithTimer(state: i32, timeout: u32) -> i32; + pub fn sceUmdWaitDriveStatCB(state: i32, timeout: u32) -> i32; + pub fn sceUmdCancelWaitDriveStat() -> i32; + pub fn sceUmdGetDriveStat() -> i32; + pub fn sceUmdGetErrorStat() -> i32; + pub fn sceUmdRegisterUMDCallBack(cbid: i32) -> i32; + pub fn sceUmdUnRegisterUMDCallBack(cbid: i32) -> i32; + pub fn sceUmdReplacePermit() -> i32; + pub fn sceUmdReplaceProhibit() -> i32; + + pub fn sceMpegInit() -> i32; + pub fn sceMpegFinish(); + pub fn sceMpegRingbufferQueryMemSize(packets: i32) -> i32; + pub fn sceMpegRingbufferConstruct( + ringbuffer: *mut SceMpegRingbuffer, + packets: i32, + data: *mut c_void, + size: i32, + callback: SceMpegRingbufferCb, + cb_param: *mut c_void, + ) -> i32; + pub fn sceMpegRingbufferDestruct(ringbuffer: *mut SceMpegRingbuffer); + pub fn sceMpegRingbufferAvailableSize( + ringbuffer: *mut SceMpegRingbuffer, + ) -> i32; + pub fn sceMpegRingbufferPut( + ringbuffer: *mut SceMpegRingbuffer, + num_packets: i32, + available: i32, + ) -> i32; + pub fn sceMpegQueryMemSize(unk: i32) -> i32; + pub fn sceMpegCreate( + handle: SceMpeg, + data: *mut c_void, + size: i32, + ringbuffer: *mut SceMpegRingbuffer, + frame_width: i32, + unk1: i32, + unk2: i32, + ) -> i32; + pub fn sceMpegDelete(handle: SceMpeg); + pub fn sceMpegQueryStreamOffset( + handle: SceMpeg, + buffer: *mut c_void, + offset: *mut i32, + ) -> i32; + pub fn sceMpegQueryStreamSize(buffer: *mut c_void, size: *mut i32) -> i32; + pub fn sceMpegRegistStream( + handle: SceMpeg, + stream_id: i32, + unk: i32, + ) -> SceMpegStream; + pub fn sceMpegUnRegistStream(handle: SceMpeg, stream: SceMpegStream); + pub fn sceMpegFlushAllStream(handle: SceMpeg) -> i32; + pub fn sceMpegMallocAvcEsBuf(handle: SceMpeg) -> *mut c_void; + pub fn sceMpegFreeAvcEsBuf(handle: SceMpeg, buf: *mut c_void); + pub fn sceMpegQueryAtracEsSize( + handle: SceMpeg, + es_size: *mut i32, + out_size: *mut i32, + ) -> i32; + pub fn sceMpegInitAu( + handle: SceMpeg, + es_buffer: *mut c_void, + au: *mut SceMpegAu, + ) -> i32; + pub fn sceMpegGetAvcAu( + handle: SceMpeg, + stream: SceMpegStream, + au: *mut SceMpegAu, + unk: *mut i32, + ) -> i32; + pub fn sceMpegAvcDecodeMode( + handle: SceMpeg, + mode: *mut SceMpegAvcMode, + ) -> i32; + pub fn sceMpegAvcDecode( + handle: SceMpeg, + au: *mut SceMpegAu, + iframe_width: i32, + buffer: *mut c_void, + init: *mut i32, + ) -> i32; + pub fn sceMpegAvcDecodeStop( + handle: SceMpeg, + frame_width: i32, + buffer: *mut c_void, + status: *mut i32, + ) -> i32; + pub fn sceMpegGetAtracAu( + handle: SceMpeg, + stream: SceMpegStream, + au: *mut SceMpegAu, + unk: *mut c_void, + ) -> i32; + pub fn sceMpegAtracDecode( + handle: SceMpeg, + au: *mut SceMpegAu, + buffer: *mut c_void, + init: i32, + ) -> i32; + + pub fn sceMpegBaseYCrCbCopyVme( + yuv_buffer: *mut c_void, + buffer: *mut i32, + type_: i32, + ) -> i32; + pub fn sceMpegBaseCscInit(width: i32) -> i32; + pub fn sceMpegBaseCscVme( + rgb_buffer: *mut c_void, + rgb_buffer2: *mut c_void, + width: i32, + y_cr_cb_buffer: *mut SceMpegYCrCbBuffer, + ) -> i32; + pub fn sceMpegbase_BEA18F91(lli: *mut SceMpegLLI) -> i32; + + pub fn sceHprmPeekCurrentKey(key: *mut i32) -> i32; + pub fn sceHprmPeekLatch(latch: *mut [u32; 4]) -> i32; + pub fn sceHprmReadLatch(latch: *mut [u32; 4]) -> i32; + pub fn sceHprmIsHeadphoneExist() -> i32; + pub fn sceHprmIsRemoteExist() -> i32; + pub fn sceHprmIsMicrophoneExist() -> i32; + + pub fn sceGuDepthBuffer(zbp: *mut c_void, zbw: i32); + pub fn sceGuDispBuffer( + width: i32, + height: i32, + dispbp: *mut c_void, + dispbw: i32, + ); + pub fn sceGuDrawBuffer( + psm: DisplayPixelFormat, + fbp: *mut c_void, + fbw: i32, + ); + pub fn sceGuDrawBufferList( + psm: DisplayPixelFormat, + fbp: *mut c_void, + fbw: i32, + ); + pub fn sceGuDisplay(state: bool) -> bool; + pub fn sceGuDepthFunc(function: DepthFunc); + pub fn sceGuDepthMask(mask: i32); + pub fn sceGuDepthOffset(offset: i32); + pub fn sceGuDepthRange(near: i32, far: i32); + pub fn sceGuFog(near: f32, far: f32, color: u32); + pub fn sceGuInit(); + pub fn sceGuTerm(); + pub fn sceGuBreak(mode: i32); + pub fn sceGuContinue(); + pub fn sceGuSetCallback( + signal: GuCallbackId, + callback: GuCallback, + ) -> GuCallback; + pub fn sceGuSignal(behavior: SignalBehavior, signal: i32); + pub fn sceGuSendCommandf(cmd: GeCommand, argument: f32); + pub fn sceGuSendCommandi(cmd: GeCommand, argument: i32); + pub fn sceGuGetMemory(size: i32) -> *mut c_void; + pub fn sceGuStart(context_type: GuContextType, list: *mut c_void); + pub fn sceGuFinish() -> i32; + pub fn sceGuFinishId(id: u32) -> i32; + pub fn sceGuCallList(list: *const c_void); + pub fn sceGuCallMode(mode: i32); + pub fn sceGuCheckList() -> i32; + pub fn sceGuSendList( + mode: GuQueueMode, + list: *const c_void, + context: *mut GeContext, + ); + pub fn sceGuSwapBuffers() -> *mut c_void; + pub fn sceGuSync( + mode: GuSyncMode, + behavior: GuSyncBehavior, + ) -> GeListState; + pub fn sceGuDrawArray( + prim: GuPrimitive, + vtype: i32, + count: i32, + indices: *const c_void, + vertices: *const c_void, + ); + pub fn sceGuBeginObject( + vtype: i32, + count: i32, + indices: *const c_void, + vertices: *const c_void, + ); + pub fn sceGuEndObject(); + pub fn sceGuSetStatus(state: GuState, status: i32); + pub fn sceGuGetStatus(state: GuState) -> bool; + pub fn sceGuSetAllStatus(status: i32); + pub fn sceGuGetAllStatus() -> i32; + pub fn sceGuEnable(state: GuState); + pub fn sceGuDisable(state: GuState); + pub fn sceGuLight( + light: i32, + type_: LightType, + components: i32, + position: &ScePspFVector3, + ); + pub fn sceGuLightAtt(light: i32, atten0: f32, atten1: f32, atten2: f32); + pub fn sceGuLightColor(light: i32, component: i32, color: u32); + pub fn sceGuLightMode(mode: LightMode); + pub fn sceGuLightSpot( + light: i32, + direction: &ScePspFVector3, + exponent: f32, + cutoff: f32, + ); + pub fn sceGuClear(flags: i32); + pub fn sceGuClearColor(color: u32); + pub fn sceGuClearDepth(depth: u32); + pub fn sceGuClearStencil(stencil: u32); + pub fn sceGuPixelMask(mask: u32); + pub fn sceGuColor(color: u32); + pub fn sceGuColorFunc(func: ColorFunc, color: u32, mask: u32); + pub fn sceGuColorMaterial(components: i32); + pub fn sceGuAlphaFunc(func: AlphaFunc, value: i32, mask: i32); + pub fn sceGuAmbient(color: u32); + pub fn sceGuAmbientColor(color: u32); + pub fn sceGuBlendFunc( + op: BlendOp, + src: BlendSrc, + dest: BlendDst, + src_fix: u32, + dest_fix: u32, + ); + pub fn sceGuMaterial(components: i32, color: u32); + pub fn sceGuModelColor( + emissive: u32, + ambient: u32, + diffuse: u32, + specular: u32, + ); + pub fn sceGuStencilFunc(func: StencilFunc, ref_: i32, mask: i32); + pub fn sceGuStencilOp( + fail: StencilOperation, + zfail: StencilOperation, + zpass: StencilOperation, + ); + pub fn sceGuSpecular(power: f32); + pub fn sceGuFrontFace(order: FrontFaceDirection); + pub fn sceGuLogicalOp(op: LogicalOperation); + pub fn sceGuSetDither(matrix: &ScePspIMatrix4); + pub fn sceGuShadeModel(mode: ShadingModel); + pub fn sceGuCopyImage( + psm: DisplayPixelFormat, + sx: i32, + sy: i32, + width: i32, + height: i32, + srcw: i32, + src: *mut c_void, + dx: i32, + dy: i32, + destw: i32, + dest: *mut c_void, + ); + pub fn sceGuTexEnvColor(color: u32); + pub fn sceGuTexFilter(min: TextureFilter, mag: TextureFilter); + pub fn sceGuTexFlush(); + pub fn sceGuTexFunc(tfx: TextureEffect, tcc: TextureColorComponent); + pub fn sceGuTexImage( + mipmap: MipmapLevel, + width: i32, + height: i32, + tbw: i32, + tbp: *const c_void, + ); + pub fn sceGuTexLevelMode(mode: TextureLevelMode, bias: f32); + pub fn sceGuTexMapMode(mode: TextureMapMode, a1: u32, a2: u32); + pub fn sceGuTexMode( + tpsm: TexturePixelFormat, + maxmips: i32, + a2: i32, + swizzle: i32, + ); + pub fn sceGuTexOffset(u: f32, v: f32); + pub fn sceGuTexProjMapMode(mode: TextureProjectionMapMode); + pub fn sceGuTexScale(u: f32, v: f32); + pub fn sceGuTexSlope(slope: f32); + pub fn sceGuTexSync(); + pub fn sceGuTexWrap(u: GuTexWrapMode, v: GuTexWrapMode); + pub fn sceGuClutLoad(num_blocks: i32, cbp: *const c_void); + pub fn sceGuClutMode( + cpsm: ClutPixelFormat, + shift: u32, + mask: u32, + a3: u32, + ); + pub fn sceGuOffset(x: u32, y: u32); + pub fn sceGuScissor(x: i32, y: i32, w: i32, h: i32); + pub fn sceGuViewport(cx: i32, cy: i32, width: i32, height: i32); + pub fn sceGuDrawBezier( + v_type: i32, + u_count: i32, + v_count: i32, + indices: *const c_void, + vertices: *const c_void, + ); + pub fn sceGuPatchDivide(ulevel: u32, vlevel: u32); + pub fn sceGuPatchFrontFace(a0: u32); + pub fn sceGuPatchPrim(prim: PatchPrimitive); + pub fn sceGuDrawSpline( + v_type: i32, + u_count: i32, + v_count: i32, + u_edge: i32, + v_edge: i32, + indices: *const c_void, + vertices: *const c_void, + ); + pub fn sceGuSetMatrix(type_: MatrixMode, matrix: &ScePspFMatrix4); + pub fn sceGuBoneMatrix(index: u32, matrix: &ScePspFMatrix4); + pub fn sceGuMorphWeight(index: i32, weight: f32); + pub fn sceGuDrawArrayN( + primitive_type: GuPrimitive, + v_type: i32, + count: i32, + a3: i32, + indices: *const c_void, + vertices: *const c_void, + ); + + pub fn sceGumDrawArray( + prim: GuPrimitive, + v_type: i32, + count: i32, + indices: *const c_void, + vertices: *const c_void, + ); + pub fn sceGumDrawArrayN( + prim: GuPrimitive, + v_type: i32, + count: i32, + a3: i32, + indices: *const c_void, + vertices: *const c_void, + ); + pub fn sceGumDrawBezier( + v_type: i32, + u_count: i32, + v_count: i32, + indices: *const c_void, + vertices: *const c_void, + ); + pub fn sceGumDrawSpline( + v_type: i32, + u_count: i32, + v_count: i32, + u_edge: i32, + v_edge: i32, + indices: *const c_void, + vertices: *const c_void, + ); + pub fn sceGumFastInverse(); + pub fn sceGumFullInverse(); + pub fn sceGumLoadIdentity(); + pub fn sceGumLoadMatrix(m: &ScePspFMatrix4); + pub fn sceGumLookAt( + eye: &ScePspFVector3, + center: &ScePspFVector3, + up: &ScePspFVector3, + ); + pub fn sceGumMatrixMode(mode: MatrixMode); + pub fn sceGumMultMatrix(m: &ScePspFMatrix4); + pub fn sceGumOrtho( + left: f32, + right: f32, + bottom: f32, + top: f32, + near: f32, + far: f32, + ); + pub fn sceGumPerspective(fovy: f32, aspect: f32, near: f32, far: f32); + pub fn sceGumPopMatrix(); + pub fn sceGumPushMatrix(); + pub fn sceGumRotateX(angle: f32); + pub fn sceGumRotateY(angle: f32); + pub fn sceGumRotateZ(angle: f32); + pub fn sceGumRotateXYZ(v: &ScePspFVector3); + pub fn sceGumRotateZYX(v: &ScePspFVector3); + pub fn sceGumScale(v: &ScePspFVector3); + pub fn sceGumStoreMatrix(m: &mut ScePspFMatrix4); + pub fn sceGumTranslate(v: &ScePspFVector3); + pub fn sceGumUpdateMatrix(); + + pub fn sceMp3ReserveMp3Handle(args: *mut SceMp3InitArg) -> i32; + pub fn sceMp3ReleaseMp3Handle(handle: Mp3Handle) -> i32; + pub fn sceMp3InitResource() -> i32; + pub fn sceMp3TermResource() -> i32; + pub fn sceMp3Init(handle: Mp3Handle) -> i32; + pub fn sceMp3Decode(handle: Mp3Handle, dst: *mut *mut i16) -> i32; + pub fn sceMp3GetInfoToAddStreamData( + handle: Mp3Handle, + dst: *mut *mut u8, + to_write: *mut i32, + src_pos: *mut i32, + ) -> i32; + pub fn sceMp3NotifyAddStreamData(handle: Mp3Handle, size: i32) -> i32; + pub fn sceMp3CheckStreamDataNeeded(handle: Mp3Handle) -> i32; + pub fn sceMp3SetLoopNum(handle: Mp3Handle, loop_: i32) -> i32; + pub fn sceMp3GetLoopNum(handle: Mp3Handle) -> i32; + pub fn sceMp3GetSumDecodedSample(handle: Mp3Handle) -> i32; + pub fn sceMp3GetMaxOutputSample(handle: Mp3Handle) -> i32; + pub fn sceMp3GetSamplingRate(handle: Mp3Handle) -> i32; + pub fn sceMp3GetBitRate(handle: Mp3Handle) -> i32; + pub fn sceMp3GetMp3ChannelNum(handle: Mp3Handle) -> i32; + pub fn sceMp3ResetPlayPosition(handle: Mp3Handle) -> i32; + + pub fn sceRegOpenRegistry( + reg: *mut Key, + mode: i32, + handle: *mut RegHandle, + ) -> i32; + pub fn sceRegFlushRegistry(handle: RegHandle) -> i32; + pub fn sceRegCloseRegistry(handle: RegHandle) -> i32; + pub fn sceRegOpenCategory( + handle: RegHandle, + name: *const u8, + mode: i32, + dir_handle: *mut RegHandle, + ) -> i32; + pub fn sceRegRemoveCategory(handle: RegHandle, name: *const u8) -> i32; + pub fn sceRegCloseCategory(dir_handle: RegHandle) -> i32; + pub fn sceRegFlushCategory(dir_handle: RegHandle) -> i32; + pub fn sceRegGetKeyInfo( + dir_handle: RegHandle, + name: *const u8, + key_handle: *mut RegHandle, + type_: *mut KeyType, + size: *mut usize, + ) -> i32; + pub fn sceRegGetKeyInfoByName( + dir_handle: RegHandle, + name: *const u8, + type_: *mut KeyType, + size: *mut usize, + ) -> i32; + pub fn sceRegGetKeyValue( + dir_handle: RegHandle, + key_handle: RegHandle, + buf: *mut c_void, + size: usize, + ) -> i32; + pub fn sceRegGetKeyValueByName( + dir_handle: RegHandle, + name: *const u8, + buf: *mut c_void, + size: usize, + ) -> i32; + pub fn sceRegSetKeyValue( + dir_handle: RegHandle, + name: *const u8, + buf: *const c_void, + size: usize, + ) -> i32; + pub fn sceRegGetKeysNum(dir_handle: RegHandle, num: *mut i32) -> i32; + pub fn sceRegGetKeys(dir_handle: RegHandle, buf: *mut u8, num: i32) + -> i32; + pub fn sceRegCreateKey( + dir_handle: RegHandle, + name: *const u8, + type_: i32, + size: usize, + ) -> i32; + pub fn sceRegRemoveRegistry(key: *mut Key) -> i32; + + pub fn sceOpenPSIDGetOpenPSID(openpsid: *mut OpenPSID) -> i32; + + pub fn sceUtilityMsgDialogInitStart( + params: *mut UtilityMsgDialogParams, + ) -> i32; + pub fn sceUtilityMsgDialogShutdownStart(); + pub fn sceUtilityMsgDialogGetStatus() -> i32; + pub fn sceUtilityMsgDialogUpdate(n: i32); + pub fn sceUtilityMsgDialogAbort() -> i32; + pub fn sceUtilityNetconfInitStart(data: *mut UtilityNetconfData) -> i32; + pub fn sceUtilityNetconfShutdownStart() -> i32; + pub fn sceUtilityNetconfUpdate(unknown: i32) -> i32; + pub fn sceUtilityNetconfGetStatus() -> i32; + pub fn sceUtilityCheckNetParam(id: i32) -> i32; + pub fn sceUtilityGetNetParam( + conf: i32, + param: NetParam, + data: *mut UtilityNetData, + ) -> i32; + pub fn sceUtilitySavedataInitStart( + params: *mut SceUtilitySavedataParam, + ) -> i32; + pub fn sceUtilitySavedataGetStatus() -> i32; + pub fn sceUtilitySavedataShutdownStart() -> i32; + pub fn sceUtilitySavedataUpdate(unknown: i32); + pub fn sceUtilityGameSharingInitStart( + params: *mut UtilityGameSharingParams, + ) -> i32; + pub fn sceUtilityGameSharingShutdownStart(); + pub fn sceUtilityGameSharingGetStatus() -> i32; + pub fn sceUtilityGameSharingUpdate(n: i32); + pub fn sceUtilityHtmlViewerInitStart( + params: *mut UtilityHtmlViewerParam, + ) -> i32; + pub fn sceUtilityHtmlViewerShutdownStart() -> i32; + pub fn sceUtilityHtmlViewerUpdate(n: i32) -> i32; + pub fn sceUtilityHtmlViewerGetStatus() -> i32; + pub fn sceUtilitySetSystemParamInt(id: SystemParamId, value: i32) -> i32; + pub fn sceUtilitySetSystemParamString( + id: SystemParamId, + str: *const u8, + ) -> i32; + pub fn sceUtilityGetSystemParamInt( + id: SystemParamId, + value: *mut i32, + ) -> i32; + pub fn sceUtilityGetSystemParamString( + id: SystemParamId, + str: *mut u8, + len: i32, + ) -> i32; + pub fn sceUtilityOskInitStart(params: *mut SceUtilityOskParams) -> i32; + pub fn sceUtilityOskShutdownStart() -> i32; + pub fn sceUtilityOskUpdate(n: i32) -> i32; + pub fn sceUtilityOskGetStatus() -> i32; + pub fn sceUtilityLoadNetModule(module: NetModule) -> i32; + pub fn sceUtilityUnloadNetModule(module: NetModule) -> i32; + pub fn sceUtilityLoadAvModule(module: AvModule) -> i32; + pub fn sceUtilityUnloadAvModule(module: AvModule) -> i32; + pub fn sceUtilityLoadUsbModule(module: UsbModule) -> i32; + pub fn sceUtilityUnloadUsbModule(module: UsbModule) -> i32; + pub fn sceUtilityLoadModule(module: Module) -> i32; + pub fn sceUtilityUnloadModule(module: Module) -> i32; + pub fn sceUtilityCreateNetParam(conf: i32) -> i32; + pub fn sceUtilitySetNetParam(param: NetParam, val: *const c_void) -> i32; + pub fn sceUtilityCopyNetParam(src: i32, dest: i32) -> i32; + pub fn sceUtilityDeleteNetParam(conf: i32) -> i32; + + pub fn sceNetInit( + poolsize: i32, + calloutprio: i32, + calloutstack: i32, + netintrprio: i32, + netintrstack: i32, + ) -> i32; + pub fn sceNetTerm() -> i32; + pub fn sceNetFreeThreadinfo(thid: i32) -> i32; + pub fn sceNetThreadAbort(thid: i32) -> i32; + pub fn sceNetEtherStrton(name: *mut u8, mac: *mut u8); + pub fn sceNetEtherNtostr(mac: *mut u8, name: *mut u8); + pub fn sceNetGetLocalEtherAddr(mac: *mut u8) -> i32; + pub fn sceNetGetMallocStat(stat: *mut SceNetMallocStat) -> i32; + + pub fn sceNetAdhocctlInit( + stacksize: i32, + priority: i32, + adhoc_id: *mut SceNetAdhocctlAdhocId, + ) -> i32; + pub fn sceNetAdhocctlTerm() -> i32; + pub fn sceNetAdhocctlConnect(name: *const u8) -> i32; + pub fn sceNetAdhocctlDisconnect() -> i32; + pub fn sceNetAdhocctlGetState(event: *mut i32) -> i32; + pub fn sceNetAdhocctlCreate(name: *const u8) -> i32; + pub fn sceNetAdhocctlJoin(scaninfo: *mut SceNetAdhocctlScanInfo) -> i32; + pub fn sceNetAdhocctlGetAdhocId(id: *mut SceNetAdhocctlAdhocId) -> i32; + pub fn sceNetAdhocctlCreateEnterGameMode( + name: *const u8, + unknown: i32, + num: i32, + macs: *mut u8, + timeout: u32, + unknown2: i32, + ) -> i32; + pub fn sceNetAdhocctlJoinEnterGameMode( + name: *const u8, + hostmac: *mut u8, + timeout: u32, + unknown: i32, + ) -> i32; + pub fn sceNetAdhocctlGetGameModeInfo( + gamemodeinfo: *mut SceNetAdhocctlGameModeInfo, + ) -> i32; + pub fn sceNetAdhocctlExitGameMode() -> i32; + pub fn sceNetAdhocctlGetPeerList( + length: *mut i32, + buf: *mut c_void, + ) -> i32; + pub fn sceNetAdhocctlGetPeerInfo( + mac: *mut u8, + size: i32, + peerinfo: *mut SceNetAdhocctlPeerInfo, + ) -> i32; + pub fn sceNetAdhocctlScan() -> i32; + pub fn sceNetAdhocctlGetScanInfo( + length: *mut i32, + buf: *mut c_void, + ) -> i32; + pub fn sceNetAdhocctlAddHandler( + handler: SceNetAdhocctlHandler, + unknown: *mut c_void, + ) -> i32; + pub fn sceNetAdhocctlDelHandler(id: i32) -> i32; + pub fn sceNetAdhocctlGetNameByAddr(mac: *mut u8, nickname: *mut u8) + -> i32; + pub fn sceNetAdhocctlGetAddrByName( + nickname: *mut u8, + length: *mut i32, + buf: *mut c_void, + ) -> i32; + pub fn sceNetAdhocctlGetParameter( + params: *mut SceNetAdhocctlParams, + ) -> i32; + + pub fn sceNetAdhocInit() -> i32; + pub fn sceNetAdhocTerm() -> i32; + pub fn sceNetAdhocPdpCreate( + mac: *mut u8, + port: u16, + buf_size: u32, + unk1: i32, + ) -> i32; + pub fn sceNetAdhocPdpDelete(id: i32, unk1: i32) -> i32; + pub fn sceNetAdhocPdpSend( + id: i32, + dest_mac_addr: *mut u8, + port: u16, + data: *mut c_void, + len: u32, + timeout: u32, + nonblock: i32, + ) -> i32; + pub fn sceNetAdhocPdpRecv( + id: i32, + src_mac_addr: *mut u8, + port: *mut u16, + data: *mut c_void, + data_length: *mut c_void, + timeout: u32, + nonblock: i32, + ) -> i32; + pub fn sceNetAdhocGetPdpStat( + size: *mut i32, + stat: *mut SceNetAdhocPdpStat, + ) -> i32; + pub fn sceNetAdhocGameModeCreateMaster( + data: *mut c_void, + size: i32, + ) -> i32; + pub fn sceNetAdhocGameModeCreateReplica( + mac: *mut u8, + data: *mut c_void, + size: i32, + ) -> i32; + pub fn sceNetAdhocGameModeUpdateMaster() -> i32; + pub fn sceNetAdhocGameModeUpdateReplica(id: i32, unk1: i32) -> i32; + pub fn sceNetAdhocGameModeDeleteMaster() -> i32; + pub fn sceNetAdhocGameModeDeleteReplica(id: i32) -> i32; + pub fn sceNetAdhocPtpOpen( + srcmac: *mut u8, + srcport: u16, + destmac: *mut u8, + destport: u16, + buf_size: u32, + delay: u32, + count: i32, + unk1: i32, + ) -> i32; + pub fn sceNetAdhocPtpConnect(id: i32, timeout: u32, nonblock: i32) -> i32; + pub fn sceNetAdhocPtpListen( + srcmac: *mut u8, + srcport: u16, + buf_size: u32, + delay: u32, + count: i32, + queue: i32, + unk1: i32, + ) -> i32; + pub fn sceNetAdhocPtpAccept( + id: i32, + mac: *mut u8, + port: *mut u16, + timeout: u32, + nonblock: i32, + ) -> i32; + pub fn sceNetAdhocPtpSend( + id: i32, + data: *mut c_void, + data_size: *mut i32, + timeout: u32, + nonblock: i32, + ) -> i32; + pub fn sceNetAdhocPtpRecv( + id: i32, + data: *mut c_void, + data_size: *mut i32, + timeout: u32, + nonblock: i32, + ) -> i32; + pub fn sceNetAdhocPtpFlush(id: i32, timeout: u32, nonblock: i32) -> i32; + pub fn sceNetAdhocPtpClose(id: i32, unk1: i32) -> i32; + pub fn sceNetAdhocGetPtpStat( + size: *mut i32, + stat: *mut SceNetAdhocPtpStat, + ) -> i32; +} + +extern "C" { + pub fn sceNetAdhocMatchingInit(memsize: i32) -> i32; + pub fn sceNetAdhocMatchingTerm() -> i32; + pub fn sceNetAdhocMatchingCreate( + mode: AdhocMatchingMode, + max_peers: i32, + port: u16, + buf_size: i32, + hello_delay: u32, + ping_delay: u32, + init_count: i32, + msg_delay: u32, + callback: AdhocMatchingCallback, + ) -> i32; + pub fn sceNetAdhocMatchingDelete(matching_id: i32) -> i32; + pub fn sceNetAdhocMatchingStart( + matching_id: i32, + evth_pri: i32, + evth_stack: i32, + inth_pri: i32, + inth_stack: i32, + opt_len: i32, + opt_data: *mut c_void, + ) -> i32; + pub fn sceNetAdhocMatchingStop(matching_id: i32) -> i32; + pub fn sceNetAdhocMatchingSelectTarget( + matching_id: i32, + mac: *mut u8, + opt_len: i32, + opt_data: *mut c_void, + ) -> i32; + pub fn sceNetAdhocMatchingCancelTarget( + matching_id: i32, + mac: *mut u8, + ) -> i32; + pub fn sceNetAdhocMatchingCancelTargetWithOpt( + matching_id: i32, + mac: *mut u8, + opt_len: i32, + opt_data: *mut c_void, + ) -> i32; + pub fn sceNetAdhocMatchingSendData( + matching_id: i32, + mac: *mut u8, + data_len: i32, + data: *mut c_void, + ) -> i32; + pub fn sceNetAdhocMatchingAbortSendData( + matching_id: i32, + mac: *mut u8, + ) -> i32; + pub fn sceNetAdhocMatchingSetHelloOpt( + matching_id: i32, + opt_len: i32, + opt_data: *mut c_void, + ) -> i32; + pub fn sceNetAdhocMatchingGetHelloOpt( + matching_id: i32, + opt_len: *mut i32, + opt_data: *mut c_void, + ) -> i32; + pub fn sceNetAdhocMatchingGetMembers( + matching_id: i32, + length: *mut i32, + buf: *mut c_void, + ) -> i32; + pub fn sceNetAdhocMatchingGetPoolMaxAlloc() -> i32; + pub fn sceNetAdhocMatchingGetPoolStat(poolstat: *mut AdhocPoolStat) + -> i32; +} + +extern "C" { + pub fn sceNetApctlInit(stack_size: i32, init_priority: i32) -> i32; + pub fn sceNetApctlTerm() -> i32; + pub fn sceNetApctlGetInfo( + code: ApctlInfo, + pinfo: *mut SceNetApctlInfo, + ) -> i32; + pub fn sceNetApctlAddHandler( + handler: SceNetApctlHandler, + parg: *mut c_void, + ) -> i32; + pub fn sceNetApctlDelHandler(handler_id: i32) -> i32; + pub fn sceNetApctlConnect(conn_index: i32) -> i32; + pub fn sceNetApctlDisconnect() -> i32; + pub fn sceNetApctlGetState(pstate: *mut ApctlState) -> i32; + + pub fn sceNetInetInit() -> i32; + pub fn sceNetInetTerm() -> i32; + pub fn sceNetInetAccept( + s: i32, + addr: *mut sockaddr, + addr_len: *mut socklen_t, + ) -> i32; + pub fn sceNetInetBind( + s: i32, + my_addr: *const sockaddr, + addr_len: socklen_t, + ) -> i32; + pub fn sceNetInetConnect( + s: i32, + serv_addr: *const sockaddr, + addr_len: socklen_t, + ) -> i32; + pub fn sceNetInetGetsockopt( + s: i32, + level: i32, + opt_name: i32, + opt_val: *mut c_void, + optl_en: *mut socklen_t, + ) -> i32; + pub fn sceNetInetListen(s: i32, backlog: i32) -> i32; + pub fn sceNetInetRecv( + s: i32, + buf: *mut c_void, + len: usize, + flags: i32, + ) -> usize; + pub fn sceNetInetRecvfrom( + s: i32, + buf: *mut c_void, + flags: usize, + arg1: i32, + from: *mut sockaddr, + from_len: *mut socklen_t, + ) -> usize; + pub fn sceNetInetSend( + s: i32, + buf: *const c_void, + len: usize, + flags: i32, + ) -> usize; + pub fn sceNetInetSendto( + s: i32, + buf: *const c_void, + len: usize, + flags: i32, + to: *const sockaddr, + to_len: socklen_t, + ) -> usize; + pub fn sceNetInetSetsockopt( + s: i32, + level: i32, + opt_name: i32, + opt_val: *const c_void, + opt_len: socklen_t, + ) -> i32; + pub fn sceNetInetShutdown(s: i32, how: i32) -> i32; + pub fn sceNetInetSocket(domain: i32, type_: i32, protocol: i32) -> i32; + pub fn sceNetInetClose(s: i32) -> i32; + pub fn sceNetInetGetErrno() -> i32; + + pub fn sceSslInit(unknown1: i32) -> i32; + pub fn sceSslEnd() -> i32; + pub fn sceSslGetUsedMemoryMax(memory: *mut u32) -> i32; + pub fn sceSslGetUsedMemoryCurrent(memory: *mut u32) -> i32; + + pub fn sceHttpInit(unknown1: u32) -> i32; + pub fn sceHttpEnd() -> i32; + pub fn sceHttpCreateTemplate( + agent: *mut u8, + unknown1: i32, + unknown2: i32, + ) -> i32; + pub fn sceHttpDeleteTemplate(templateid: i32) -> i32; + pub fn sceHttpCreateConnection( + templateid: i32, + host: *mut u8, + unknown1: *mut u8, + port: u16, + unknown2: i32, + ) -> i32; + pub fn sceHttpCreateConnectionWithURL( + templateid: i32, + url: *const u8, + unknown1: i32, + ) -> i32; + pub fn sceHttpDeleteConnection(connection_id: i32) -> i32; + pub fn sceHttpCreateRequest( + connection_id: i32, + method: HttpMethod, + path: *mut u8, + content_length: u64, + ) -> i32; + pub fn sceHttpCreateRequestWithURL( + connection_id: i32, + method: HttpMethod, + url: *mut u8, + content_length: u64, + ) -> i32; + pub fn sceHttpDeleteRequest(request_id: i32) -> i32; + pub fn sceHttpSendRequest( + request_id: i32, + data: *mut c_void, + data_size: u32, + ) -> i32; + pub fn sceHttpAbortRequest(request_id: i32) -> i32; + pub fn sceHttpReadData( + request_id: i32, + data: *mut c_void, + data_size: u32, + ) -> i32; + pub fn sceHttpGetContentLength( + request_id: i32, + content_length: *mut u64, + ) -> i32; + pub fn sceHttpGetStatusCode(request_id: i32, status_code: *mut i32) + -> i32; + pub fn sceHttpSetResolveTimeOut(id: i32, timeout: u32) -> i32; + pub fn sceHttpSetResolveRetry(id: i32, count: i32) -> i32; + pub fn sceHttpSetConnectTimeOut(id: i32, timeout: u32) -> i32; + pub fn sceHttpSetSendTimeOut(id: i32, timeout: u32) -> i32; + pub fn sceHttpSetRecvTimeOut(id: i32, timeout: u32) -> i32; + pub fn sceHttpEnableKeepAlive(id: i32) -> i32; + pub fn sceHttpDisableKeepAlive(id: i32) -> i32; + pub fn sceHttpEnableRedirect(id: i32) -> i32; + pub fn sceHttpDisableRedirect(id: i32) -> i32; + pub fn sceHttpEnableCookie(id: i32) -> i32; + pub fn sceHttpDisableCookie(id: i32) -> i32; + pub fn sceHttpSaveSystemCookie() -> i32; + pub fn sceHttpLoadSystemCookie() -> i32; + pub fn sceHttpAddExtraHeader( + id: i32, + name: *mut u8, + value: *mut u8, + unknown1: i32, + ) -> i32; + pub fn sceHttpDeleteHeader(id: i32, name: *const u8) -> i32; + pub fn sceHttpsInit( + unknown1: i32, + unknown2: i32, + unknown3: i32, + unknown4: i32, + ) -> i32; + pub fn sceHttpsEnd() -> i32; + pub fn sceHttpsLoadDefaultCert(unknown1: i32, unknown2: i32) -> i32; + pub fn sceHttpDisableAuth(id: i32) -> i32; + pub fn sceHttpDisableCache(id: i32) -> i32; + pub fn sceHttpEnableAuth(id: i32) -> i32; + pub fn sceHttpEnableCache(id: i32) -> i32; + pub fn sceHttpEndCache() -> i32; + pub fn sceHttpGetAllHeader( + request: i32, + header: *mut *mut u8, + header_size: *mut u32, + ) -> i32; + pub fn sceHttpGetNetworkErrno(request: i32, err_num: *mut i32) -> i32; + pub fn sceHttpGetProxy( + id: i32, + activate_flag: *mut i32, + mode: *mut i32, + proxy_host: *mut u8, + len: usize, + proxy_port: *mut u16, + ) -> i32; + pub fn sceHttpInitCache(max_size: usize) -> i32; + pub fn sceHttpSetAuthInfoCB(id: i32, cbfunc: HttpPasswordCB) -> i32; + pub fn sceHttpSetProxy( + id: i32, + activate_flag: i32, + mode: i32, + new_proxy_host: *const u8, + new_proxy_port: u16, + ) -> i32; + pub fn sceHttpSetResHeaderMaxSize(id: i32, header_size: u32) -> i32; + pub fn sceHttpSetMallocFunction( + malloc_func: HttpMallocFunction, + free_func: HttpFreeFunction, + realloc_func: HttpReallocFunction, + ) -> i32; + + pub fn sceNetResolverInit() -> i32; + pub fn sceNetResolverCreate( + rid: *mut i32, + buf: *mut c_void, + buf_length: u32, + ) -> i32; + pub fn sceNetResolverDelete(rid: i32) -> i32; + pub fn sceNetResolverStartNtoA( + rid: i32, + hostname: *const u8, + addr: *mut in_addr, + timeout: u32, + retry: i32, + ) -> i32; + pub fn sceNetResolverStartAtoN( + rid: i32, + addr: *const in_addr, + hostname: *mut u8, + hostname_len: u32, + timeout: u32, + retry: i32, + ) -> i32; + pub fn sceNetResolverStop(rid: i32) -> i32; + pub fn sceNetResolverTerm() -> i32; +} diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/apple/mod.rs cargo-0.47.0/vendor/libc/src/unix/bsd/apple/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/apple/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/apple/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -2,6 +2,7 @@ //! //! This covers *-apple-* triples currently pub type c_char = i8; +pub type wchar_t = i32; pub type clock_t = c_ulong; pub type time_t = c_long; pub type suseconds_t = i32; @@ -34,6 +35,8 @@ pub type sae_associd_t = u32; pub type sae_connid_t = u32; +pub type mach_port_t = ::c_uint; + deprecated_mach! { pub type vm_prot_t = ::c_int; pub type vm_size_t = ::uintptr_t; @@ -679,6 +682,14 @@ (*(self as *const siginfo_t as *const siginfo_timer)).si_value } + + pub unsafe fn si_pid(&self) -> ::pid_t { + self.si_pid + } + + pub unsafe fn si_uid(&self) -> ::uid_t { + self.si_uid + } } cfg_if! { @@ -1395,6 +1406,7 @@ pub const O_NOCTTY: ::c_int = 0x20000; pub const O_CLOEXEC: ::c_int = 0x1000000; pub const O_DIRECTORY: ::c_int = 0x100000; +pub const O_SYMLINK: ::c_int = 0x200000; pub const S_IFIFO: mode_t = 4096; pub const S_IFCHR: mode_t = 8192; pub const S_IFBLK: mode_t = 24576; @@ -3229,24 +3241,26 @@ (__DARWIN_ALIGN32(::mem::size_of::<::cmsghdr>()) + length as usize) as ::c_uint } +} - pub fn WSTOPSIG(status: ::c_int) -> ::c_int { +safe_f! { + pub {const} fn WSTOPSIG(status: ::c_int) -> ::c_int { status >> 8 } - pub fn _WSTATUS(status: ::c_int) -> ::c_int { + pub {const} fn _WSTATUS(status: ::c_int) -> ::c_int { status & 0x7f } - pub fn WIFCONTINUED(status: ::c_int) -> bool { + pub {const} fn WIFCONTINUED(status: ::c_int) -> bool { _WSTATUS(status) == _WSTOPPED && WSTOPSIG(status) == 0x13 } - pub fn WIFSIGNALED(status: ::c_int) -> bool { + pub {const} fn WIFSIGNALED(status: ::c_int) -> bool { _WSTATUS(status) != _WSTOPPED && _WSTATUS(status) != 0 } - pub fn WIFSTOPPED(status: ::c_int) -> bool { + pub {const} fn WIFSTOPPED(status: ::c_int) -> bool { _WSTATUS(status) == _WSTOPPED && WSTOPSIG(status) != 0x13 } } @@ -3394,6 +3408,7 @@ name: *mut ::c_char, len: ::size_t, ) -> ::c_int; + pub fn pthread_from_mach_thread_np(port: ::mach_port_t) -> ::pthread_t; pub fn pthread_get_stackaddr_np(thread: ::pthread_t) -> *mut ::c_void; pub fn pthread_get_stacksize_np(thread: ::pthread_t) -> ::size_t; pub fn pthread_condattr_setpshared( diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/errno.rs cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/errno.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/errno.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/errno.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,6 +1,7 @@ // DragonFlyBSD's __error function is declared with "static inline", so it must // be implemented in the libc crate, as a pointer to a static thread_local. f! { + #[deprecated(since = "0.2.77", "Use `__errno_location()` instead")] pub fn __error() -> *mut ::c_int { &mut errno } diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/mod.rs cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,5 +1,6 @@ pub type dev_t = u32; pub type c_char = i8; +pub type wchar_t = i32; pub type clock_t = u64; pub type ino_t = u64; pub type lwpid_t = i32; @@ -459,21 +460,6 @@ pub const Q_GETQUOTA: ::c_int = 0x300; pub const Q_SETQUOTA: ::c_int = 0x400; -pub const CLOCK_REALTIME: ::clockid_t = 0; -pub const CLOCK_VIRTUAL: ::clockid_t = 1; -pub const CLOCK_PROF: ::clockid_t = 2; -pub const CLOCK_MONOTONIC: ::clockid_t = 4; -pub const CLOCK_UPTIME: ::clockid_t = 5; -pub const CLOCK_UPTIME_PRECISE: ::clockid_t = 7; -pub const CLOCK_UPTIME_FAST: ::clockid_t = 8; -pub const CLOCK_REALTIME_PRECISE: ::clockid_t = 9; -pub const CLOCK_REALTIME_FAST: ::clockid_t = 10; -pub const CLOCK_MONOTONIC_PRECISE: ::clockid_t = 11; -pub const CLOCK_MONOTONIC_FAST: ::clockid_t = 12; -pub const CLOCK_SECOND: ::clockid_t = 13; -pub const CLOCK_THREAD_CPUTIME_ID: ::clockid_t = 14; -pub const CLOCK_PROCESS_CPUTIME_ID: ::clockid_t = 15; - pub const CTL_UNSPEC: ::c_int = 0; pub const CTL_KERN: ::c_int = 1; pub const CTL_VM: ::c_int = 2; @@ -1053,19 +1039,20 @@ } } +safe_f! { + pub {const} fn WIFSIGNALED(status: ::c_int) -> bool { + (status & 0o177) != 0o177 && (status & 0o177) != 0 + } +} + extern "C" { + pub fn __errno_location() -> *mut ::c_int; pub fn setgrent(); pub fn mprotect( addr: *mut ::c_void, len: ::size_t, prot: ::c_int, ) -> ::c_int; - pub fn clock_getres(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int; - pub fn clock_gettime(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int; - pub fn clock_settime( - clk_id: ::clockid_t, - tp: *const ::timespec, - ) -> ::c_int; pub fn setutxdb(_type: ::c_uint, file: *mut ::c_char) -> ::c_int; diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/aarch64.rs cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/aarch64.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/aarch64.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/aarch64.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,34 +1,10 @@ pub type c_char = u8; pub type c_long = i64; pub type c_ulong = u64; +pub type wchar_t = u32; pub type time_t = i64; pub type suseconds_t = i64; - -s! { - pub struct stat { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_size: ::off_t, - pub st_blocks: ::blkcnt_t, - pub st_blksize: ::blksize_t, - pub st_flags: ::fflags_t, - pub st_gen: u32, - pub st_lspare: i32, - pub st_birthtime: ::time_t, - pub st_birthtime_nsec: ::c_long, - } -} +pub type register_t = i64; // should be pub(crate), but that requires Rust 1.18.0 cfg_if! { @@ -42,3 +18,4 @@ } pub const MAP_32BIT: ::c_int = 0x00080000; +pub const MINSIGSTKSZ: ::size_t = 4096; // 1024 * 4 diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/arm.rs cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/arm.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/arm.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/arm.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ pub type c_char = u8; pub type c_long = i32; pub type c_ulong = u32; +pub type wchar_t = u32; pub type time_t = i64; pub type suseconds_t = i32; +pub type register_t = i32; s! { pub struct stat { @@ -45,3 +47,4 @@ } } pub const MAP_32BIT: ::c_int = 0x00080000; +pub const MINSIGSTKSZ: ::size_t = 4096; // 1024 * 4 diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b64.rs cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b64.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b64.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b64.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,32 @@ +#[repr(C)] +#[cfg_attr(feature = "extra_traits", derive(Debug, Eq, Hash, PartialEq))] +pub struct stat { + pub st_dev: ::dev_t, + pub st_ino: ::ino_t, + pub st_mode: ::mode_t, + pub st_nlink: ::nlink_t, + pub st_uid: ::uid_t, + pub st_gid: ::gid_t, + pub st_rdev: ::dev_t, + pub st_atime: ::time_t, + pub st_atime_nsec: ::c_long, + pub st_mtime: ::time_t, + pub st_mtime_nsec: ::c_long, + pub st_ctime: ::time_t, + pub st_ctime_nsec: ::c_long, + pub st_size: ::off_t, + pub st_blocks: ::blkcnt_t, + pub st_blksize: ::blksize_t, + pub st_flags: ::fflags_t, + pub st_gen: u32, + pub st_lspare: i32, + pub st_birthtime: ::time_t, + pub st_birthtime_nsec: ::c_long, +} + +impl ::Copy for ::stat {} +impl ::Clone for ::stat { + fn clone(&self) -> ::stat { + *self + } +} diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/mod.rs cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -217,8 +217,9 @@ } cfg_if! { - if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; + if #[cfg(any(target_arch = "x86_64", + target_arch = "aarch64"))] { + mod b64; + pub use self::b64::*; } } diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/x86_64.rs cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/x86_64.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/x86_64.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/x86_64.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -#[repr(C)] -#[cfg_attr(feature = "extra_traits", derive(Debug, Eq, Hash, PartialEq))] -pub struct stat { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_mode: ::mode_t, - pub st_nlink: ::nlink_t, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - pub st_rdev: ::dev_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_size: ::off_t, - pub st_blocks: ::blkcnt_t, - pub st_blksize: ::blksize_t, - pub st_flags: ::fflags_t, - pub st_gen: u32, - pub st_lspare: i32, - pub st_birthtime: ::time_t, - pub st_birthtime_nsec: ::c_long, -} - -impl ::Copy for ::stat {} -impl ::Clone for ::stat { - fn clone(&self) -> ::stat { - *self - } -} diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/b64.rs cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/b64.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/b64.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/b64.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,34 @@ +#[repr(C)] +#[cfg_attr(feature = "extra_traits", derive(Debug, Eq, Hash, PartialEq))] +pub struct stat { + pub st_dev: ::dev_t, + pub st_ino: ::ino_t, + pub st_nlink: ::nlink_t, + pub st_mode: ::mode_t, + st_padding0: i16, + pub st_uid: ::uid_t, + pub st_gid: ::gid_t, + st_padding1: i32, + pub st_rdev: ::dev_t, + pub st_atime: ::time_t, + pub st_atime_nsec: ::c_long, + pub st_mtime: ::time_t, + pub st_mtime_nsec: ::c_long, + pub st_ctime: ::time_t, + pub st_ctime_nsec: ::c_long, + pub st_birthtime: ::time_t, + pub st_birthtime_nsec: ::c_long, + pub st_size: ::off_t, + pub st_blocks: ::blkcnt_t, + pub st_blksize: ::blksize_t, + pub st_flags: ::fflags_t, + pub st_gen: u64, + pub st_spare: [u64; 10], +} + +impl ::Copy for ::stat {} +impl ::Clone for ::stat { + fn clone(&self) -> ::stat { + *self + } +} diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/mod.rs cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -190,6 +190,13 @@ } } +pub const F_ADD_SEALS: ::c_int = 19; +pub const F_GET_SEALS: ::c_int = 20; +pub const F_SEAL_SEAL: ::c_int = 0x0001; +pub const F_SEAL_SHRINK: ::c_int = 0x0002; +pub const F_SEAL_GROW: ::c_int = 0x0004; +pub const F_SEAL_WRITE: ::c_int = 0x0008; + cfg_if! { if #[cfg(not(freebsd13))] { pub const ELAST: ::c_int = 96; @@ -217,8 +224,9 @@ } cfg_if! { - if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; + if #[cfg(any(target_arch = "x86_64", + target_arch = "aarch64"))] { + mod b64; + pub use self::b64::*; } } diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/x86_64.rs cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/x86_64.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/x86_64.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/x86_64.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ -#[repr(C)] -#[cfg_attr(feature = "extra_traits", derive(Debug, Eq, Hash, PartialEq))] -pub struct stat { - pub st_dev: ::dev_t, - pub st_ino: ::ino_t, - pub st_nlink: ::nlink_t, - pub st_mode: ::mode_t, - st_padding0: i16, - pub st_uid: ::uid_t, - pub st_gid: ::gid_t, - st_padding1: i32, - pub st_rdev: ::dev_t, - pub st_atime: ::time_t, - pub st_atime_nsec: ::c_long, - pub st_mtime: ::time_t, - pub st_mtime_nsec: ::c_long, - pub st_ctime: ::time_t, - pub st_ctime_nsec: ::c_long, - pub st_birthtime: ::time_t, - pub st_birthtime_nsec: ::c_long, - pub st_size: ::off_t, - pub st_blocks: ::blkcnt_t, - pub st_blksize: ::blksize_t, - pub st_flags: ::fflags_t, - pub st_gen: u64, - pub st_spare: [u64; 10], -} - -impl ::Copy for ::stat {} -impl ::Clone for ::stat { - fn clone(&self) -> ::stat { - *self - } -} diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/mod.rs cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -116,6 +116,19 @@ pub sc_ngroups: ::c_int, pub sc_groups: [::gid_t; 1], } + + pub struct ptrace_vm_entry { + pub pve_entry: ::c_int, + pub pve_timestamp: ::c_int, + pub pve_start: ::c_ulong, + pub pve_end: ::c_ulong, + pub pve_offset: ::c_ulong, + pub pve_prot: ::c_uint, + pub pve_pathlen: ::c_uint, + pub pve_fileid: ::c_long, + pub pve_fsid: u32, + pub pve_path: *mut ::c_char, + } } s_no_extra_traits! { @@ -322,10 +335,17 @@ pub const EXTATTR_NAMESPACE_USER: ::c_int = 1; pub const EXTATTR_NAMESPACE_SYSTEM: ::c_int = 2; -pub const RAND_MAX: ::c_int = 0x7fff_fffd; -pub const PTHREAD_STACK_MIN: ::size_t = 2048; +cfg_if! { + if #[cfg(any(freebsd10, freebsd11, freebsd12))] { + pub const RAND_MAX: ::c_int = 0x7fff_fffd; + } else { + pub const RAND_MAX: ::c_int = 0x7fff_ffff; + } +} + +pub const PTHREAD_STACK_MIN: ::size_t = MINSIGSTKSZ; pub const PTHREAD_MUTEX_ADAPTIVE_NP: ::c_int = 4; -pub const SIGSTKSZ: ::size_t = 34816; +pub const SIGSTKSZ: ::size_t = MINSIGSTKSZ + 32768; pub const SF_NODISKIO: ::c_int = 0x00000001; pub const SF_MNOWAIT: ::c_int = 0x00000002; pub const SF_SYNC: ::c_int = 0x00000004; @@ -427,22 +447,13 @@ pub const MADV_PROTECT: ::c_int = 10; pub const RUSAGE_THREAD: ::c_int = 1; -pub const CLOCK_REALTIME: ::clockid_t = 0; -pub const CLOCK_VIRTUAL: ::clockid_t = 1; -pub const CLOCK_PROF: ::clockid_t = 2; -pub const CLOCK_MONOTONIC: ::clockid_t = 4; -pub const CLOCK_UPTIME: ::clockid_t = 5; -pub const CLOCK_UPTIME_PRECISE: ::clockid_t = 7; -pub const CLOCK_UPTIME_FAST: ::clockid_t = 8; -pub const CLOCK_REALTIME_PRECISE: ::clockid_t = 9; -pub const CLOCK_REALTIME_FAST: ::clockid_t = 10; -pub const CLOCK_MONOTONIC_PRECISE: ::clockid_t = 11; -pub const CLOCK_MONOTONIC_FAST: ::clockid_t = 12; -pub const CLOCK_SECOND: ::clockid_t = 13; -pub const CLOCK_THREAD_CPUTIME_ID: ::clockid_t = 14; -pub const CLOCK_PROCESS_CPUTIME_ID: ::clockid_t = 15; - +#[doc(hidden)] +#[deprecated( + since = "0.2.72", + note = "CTL_UNSPEC is deprecated. Use CTL_SYSCTL instead" +)] pub const CTL_UNSPEC: ::c_int = 0; +pub const CTL_SYSCTL: ::c_int = 0; pub const CTL_KERN: ::c_int = 1; pub const CTL_VM: ::c_int = 2; pub const CTL_VFS: ::c_int = 3; @@ -452,6 +463,13 @@ pub const CTL_MACHDEP: ::c_int = 7; pub const CTL_USER: ::c_int = 8; pub const CTL_P1003_1B: ::c_int = 9; +pub const CTL_SYSCTL_DEBUG: ::c_int = 0; +pub const CTL_SYSCTL_NAME: ::c_int = 1; +pub const CTL_SYSCTL_NEXT: ::c_int = 2; +pub const CTL_SYSCTL_NAME2OID: ::c_int = 3; +pub const CTL_SYSCTL_OIDFMT: ::c_int = 4; +pub const CTL_SYSCTL_OIDDESCR: ::c_int = 5; +pub const CTL_SYSCTL_OIDLABEL: ::c_int = 6; pub const KERN_OSTYPE: ::c_int = 1; pub const KERN_OSRELEASE: ::c_int = 2; pub const KERN_OSREV: ::c_int = 3; @@ -769,8 +787,14 @@ pub const IPPROTO_NSP: ::c_int = 31; /// Merit Internodal pub const IPPROTO_INP: ::c_int = 32; -/// Sequential Exchange +#[doc(hidden)] +#[deprecated( + since = "0.2.72", + note = "IPPROTO_SEP is deprecated. Use IPPROTO_DCCP instead" +)] pub const IPPROTO_SEP: ::c_int = 33; +/// Datagram Congestion Control Protocol +pub const IPPROTO_DCCP: ::c_int = 33; /// Third Party Connect pub const IPPROTO_3PC: ::c_int = 34; /// InterDomain Policy Routing @@ -1017,6 +1041,7 @@ #[deprecated(since = "0.2.54", note = "Removed in FreeBSD 11")] pub const USER_MAXID: ::c_int = 21; #[doc(hidden)] +#[deprecated(since = "0.2.74", note = "Removed in FreeBSD 13")] pub const CTL_P1003_1B_MAXID: ::c_int = 26; pub const MSG_NOTIFICATION: ::c_int = 0x00002000; @@ -1115,6 +1140,15 @@ pub const UF_HIDDEN: ::c_ulong = 0x00008000; pub const SF_SNAPSHOT: ::c_ulong = 0x00200000; +pub const F_OGETLK: ::c_int = 7; +pub const F_OSETLK: ::c_int = 8; +pub const F_OSETLKW: ::c_int = 9; +pub const F_DUP2FD: ::c_int = 10; +pub const F_SETLK_REMOTE: ::c_int = 14; +pub const F_READAHEAD: ::c_int = 15; +pub const F_RDAHEAD: ::c_int = 16; +pub const F_DUP2FD_CLOEXEC: ::c_int = 18; + fn _ALIGN(p: usize) -> usize { (p + _ALIGNBYTES) & !_ALIGNBYTES } @@ -1166,16 +1200,15 @@ } } +safe_f! { + pub {const} fn WIFSIGNALED(status: ::c_int) -> bool { + (status & 0o177) != 0o177 && (status & 0o177) != 0 && status != 0x13 + } +} + extern "C" { pub fn __error() -> *mut ::c_int; - pub fn clock_getres(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int; - pub fn clock_gettime(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int; - pub fn clock_settime( - clk_id: ::clockid_t, - tp: *const ::timespec, - ) -> ::c_int; - pub fn extattr_delete_fd( fd: ::c_int, attrnamespace: ::c_int, diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc64.rs cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc64.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc64.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc64.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ pub type c_char = u8; pub type c_long = i64; pub type c_ulong = u64; +pub type wchar_t = i32; pub type time_t = i64; pub type suseconds_t = i64; +pub type register_t = i64; s! { pub struct stat { @@ -42,3 +44,4 @@ } pub const MAP_32BIT: ::c_int = 0x00080000; +pub const MINSIGSTKSZ: ::size_t = 2048; // 512 * 4 diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86_64/align.rs cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86_64/align.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86_64/align.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86_64/align.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,7 +1,197 @@ +use {c_long, register_t}; + s_no_extra_traits! { #[allow(missing_debug_implementations)] #[repr(align(16))] pub struct max_align_t { priv_: [f64; 4] } + + #[repr(align(16))] + pub struct mcontext_t { + pub mc_onstack: register_t, + pub mc_rdi: register_t, + pub mc_rsi: register_t, + pub mc_rdx: register_t, + pub mc_rcx: register_t, + pub mc_r8: register_t, + pub mc_r9: register_t, + pub mc_rax: register_t, + pub mc_rbx: register_t, + pub mc_rbp: register_t, + pub mc_r10: register_t, + pub mc_r11: register_t, + pub mc_r12: register_t, + pub mc_r13: register_t, + pub mc_r14: register_t, + pub mc_r15: register_t, + pub mc_trapno: u32, + pub mc_fs: u16, + pub mc_gs: u16, + pub mc_addr: register_t, + pub mc_flags: u32, + pub mc_es: u16, + pub mc_ds: u16, + pub mc_err: register_t, + pub mc_rip: register_t, + pub mc_cs: register_t, + pub mc_rflags: register_t, + pub mc_rsp: register_t, + pub mc_ss: register_t, + pub mc_len: c_long, + pub mc_fpformat: c_long, + pub mc_ownedfp: c_long, + pub mc_fpstate: [c_long; 64], + pub mc_fsbase: register_t, + pub mc_gsbase: register_t, + pub mc_xfpustate: register_t, + pub mc_xfpustate_len: register_t, + pub mc_spare: [c_long; 4], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for mcontext_t { + fn eq(&self, other: &mcontext_t) -> bool { + self.mc_onstack == other.mc_onstack && + self.mc_rdi == other.mc_rdi && + self.mc_rsi == other.mc_rsi && + self.mc_rdx == other.mc_rdx && + self.mc_rcx == other.mc_rcx && + self.mc_r8 == other.mc_r8 && + self.mc_r9 == other.mc_r9 && + self.mc_rax == other.mc_rax && + self.mc_rbx == other.mc_rbx && + self.mc_rbp == other.mc_rbp && + self.mc_r10 == other.mc_r10 && + self.mc_r11 == other.mc_r11 && + self.mc_r12 == other.mc_r12 && + self.mc_r13 == other.mc_r13 && + self.mc_r14 == other.mc_r14 && + self.mc_r15 == other.mc_r15 && + self.mc_trapno == other.mc_trapno && + self.mc_fs == other.mc_fs && + self.mc_gs == other.mc_gs && + self.mc_addr == other.mc_addr && + self.mc_flags == other.mc_flags && + self.mc_es == other.mc_es && + self.mc_ds == other.mc_ds && + self.mc_err == other.mc_err && + self.mc_rip == other.mc_rip && + self.mc_cs == other.mc_cs && + self.mc_rflags == other.mc_rflags && + self.mc_rsp == other.mc_rsp && + self.mc_ss == other.mc_ss && + self.mc_len == other.mc_len && + self.mc_fpformat == other.mc_fpformat && + self.mc_ownedfp == other.mc_ownedfp && + self.mc_fpstate.iter().zip(other.mc_fpstate.iter()) + .all(|(a, b)| a == b) && + self.mc_fsbase == other.mc_fsbase && + self.mc_gsbase == other.mc_gsbase && + self.mc_xfpustate == other.mc_xfpustate && + self.mc_xfpustate_len == other.mc_xfpustate_len && + self.mc_spare == other.mc_spare + } + } + impl Eq for mcontext_t {} + impl ::fmt::Debug for mcontext_t { + fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { + f.debug_struct("mcontext_t") + .field("mc_onstack", &self.mc_onstack) + .field("mc_rdi", &self.mc_rdi) + .field("mc_rsi", &self.mc_rsi) + .field("mc_rdx", &self.mc_rdx) + .field("mc_rcx", &self.mc_rcx) + .field("mc_r8", &self.mc_r8) + .field("mc_r9", &self.mc_r9) + .field("mc_rax", &self.mc_rax) + .field("mc_rbx", &self.mc_rbx) + .field("mc_rbp", &self.mc_rbp) + .field("mc_r10", &self.mc_r10) + .field("mc_r11", &self.mc_r11) + .field("mc_r12", &self.mc_r12) + .field("mc_r13", &self.mc_r13) + .field("mc_r14", &self.mc_r14) + .field("mc_r15", &self.mc_r15) + .field("mc_trapno", &self.mc_trapno) + .field("mc_fs", &self.mc_fs) + .field("mc_gs", &self.mc_gs) + .field("mc_addr", &self.mc_addr) + .field("mc_flags", &self.mc_flags) + .field("mc_es", &self.mc_es) + .field("mc_ds", &self.mc_ds) + .field("mc_err", &self.mc_err) + .field("mc_rip", &self.mc_rip) + .field("mc_cs", &self.mc_cs) + .field("mc_rflags", &self.mc_rflags) + .field("mc_rsp", &self.mc_rsp) + .field("mc_ss", &self.mc_ss) + .field("mc_len", &self.mc_len) + .field("mc_fpformat", &self.mc_fpformat) + .field("mc_ownedfp", &self.mc_ownedfp) + // FIXME: .field("mc_fpstate", &self.mc_fpstate) + .field("mc_fsbase", &self.mc_fsbase) + .field("mc_gsbase", &self.mc_gsbase) + .field("mc_xfpustate", &self.mc_xfpustate) + .field("mc_xfpustate_len", &self.mc_xfpustate_len) + .field("mc_spare", &self.mc_spare) + .finish() + } + } + impl ::hash::Hash for mcontext_t { + fn hash(&self, state: &mut H) { + self.mc_onstack.hash(state); + self.mc_rdi.hash(state); + self.mc_rsi.hash(state); + self.mc_rdx.hash(state); + self.mc_rcx.hash(state); + self.mc_r8.hash(state); + self.mc_r9.hash(state); + self.mc_rax.hash(state); + self.mc_rbx.hash(state); + self.mc_rbp.hash(state); + self.mc_r10.hash(state); + self.mc_r11.hash(state); + self.mc_r12.hash(state); + self.mc_r13.hash(state); + self.mc_r14.hash(state); + self.mc_r15.hash(state); + self.mc_trapno.hash(state); + self.mc_fs.hash(state); + self.mc_gs.hash(state); + self.mc_addr.hash(state); + self.mc_flags.hash(state); + self.mc_es.hash(state); + self.mc_ds.hash(state); + self.mc_err.hash(state); + self.mc_rip.hash(state); + self.mc_cs.hash(state); + self.mc_rflags.hash(state); + self.mc_rsp.hash(state); + self.mc_ss.hash(state); + self.mc_len.hash(state); + self.mc_fpformat.hash(state); + self.mc_ownedfp.hash(state); + self.mc_fpstate.hash(state); + self.mc_fsbase.hash(state); + self.mc_gsbase.hash(state); + self.mc_xfpustate.hash(state); + self.mc_xfpustate_len.hash(state); + self.mc_spare.hash(state); + } + } + } +} + +s! { + pub struct ucontext_t { + pub uc_sigmask: ::sigset_t, + pub uc_mcontext: ::mcontext_t, + pub uc_link: *mut ::ucontext_t, + pub uc_stack: ::stack_t, + pub uc_flags: ::c_int, + __spare__: [::c_int; 4], + } } diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86_64/mod.rs cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86_64/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86_64/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86_64/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,180 @@ pub type c_char = i8; pub type c_long = i64; pub type c_ulong = u64; +pub type wchar_t = i32; pub type time_t = i64; pub type suseconds_t = i64; +pub type register_t = i64; + +s! { + pub struct reg32 { + pub r_fs: u32, + pub r_es: u32, + pub r_ds: u32, + pub r_edi: u32, + pub r_esi: u32, + pub r_ebp: u32, + pub r_isp: u32, + pub r_ebx: u32, + pub r_edx: u32, + pub r_ecx: u32, + pub r_eax: u32, + pub r_trapno: u32, + pub r_err: u32, + pub r_eip: u32, + pub r_cs: u32, + pub r_eflags: u32, + pub r_esp: u32, + pub r_ss: u32, + pub r_gs: u32, + } + + pub struct reg { + pub r_r15: i64, + pub r_r14: i64, + pub r_r13: i64, + pub r_r12: i64, + pub r_r11: i64, + pub r_r10: i64, + pub r_r9: i64, + pub r_r8: i64, + pub r_rdi: i64, + pub r_rsi: i64, + pub r_rbp: i64, + pub r_rbx: i64, + pub r_rdx: i64, + pub r_rcx: i64, + pub r_rax: i64, + pub r_trapno: u32, + pub r_fs: u16, + pub r_gs: u16, + pub r_err: u32, + pub r_es: u16, + pub r_ds: u16, + pub r_rip: i64, + pub r_cs: i64, + pub r_rflags: i64, + pub r_rsp: i64, + pub r_ss: i64, + } +} + +s_no_extra_traits! { + pub struct fpreg32 { + pub fpr_env: [u32; 7], + pub fpr_acc: [[u8; 10]; 8], + pub fpr_ex_sw: u32, + pub fpr_pad: [u8; 64], + } + + pub struct fpreg { + pub fpr_env: [u64; 4], + pub fpr_acc: [[u8; 16]; 8], + pub fpr_xacc: [[u8; 16]; 16], + pub fpr_spare: [u64; 12], + } + + pub struct xmmreg { + pub xmm_env: [u32; 8], + pub xmm_acc: [[u8; 16]; 8], + pub xmm_reg: [[u8; 16]; 8], + pub xmm_pad: [u8; 224], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for fpreg32 { + fn eq(&self, other: &fpreg32) -> bool { + self.fpr_env == other.fpr_env && + self.fpr_acc == other.fpr_acc && + self.fpr_ex_sw == other.fpr_ex_sw && + self.fpr_pad + .iter() + .zip(other.fpr_pad.iter()) + .all(|(a,b)| a == b) + } + } + impl Eq for fpreg32 {} + impl ::fmt::Debug for fpreg32 { + fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { + f.debug_struct("fpreg32") + .field("fpr_env", &&self.fpr_env[..]) + .field("fpr_acc", &self.fpr_acc) + .field("fpr_ex_sw", &self.fpr_ex_sw) + .field("fpr_pad", &&self.fpr_pad[..]) + .finish() + } + } + impl ::hash::Hash for fpreg32 { + fn hash(&self, state: &mut H) { + self.fpr_env.hash(state); + self.fpr_acc.hash(state); + self.fpr_ex_sw.hash(state); + self.fpr_pad.hash(state); + } + } + + impl PartialEq for fpreg { + fn eq(&self, other: &fpreg) -> bool { + self.fpr_env == other.fpr_env && + self.fpr_acc == other.fpr_acc && + self.fpr_xacc == other.fpr_xacc && + self.fpr_spare == other.fpr_spare + } + } + impl Eq for fpreg {} + impl ::fmt::Debug for fpreg { + fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { + f.debug_struct("fpreg") + .field("fpr_env", &self.fpr_env) + .field("fpr_acc", &self.fpr_acc) + .field("fpr_xacc", &self.fpr_xacc) + .field("fpr_spare", &self.fpr_spare) + .finish() + } + } + impl ::hash::Hash for fpreg { + fn hash(&self, state: &mut H) { + self.fpr_env.hash(state); + self.fpr_acc.hash(state); + self.fpr_xacc.hash(state); + self.fpr_spare.hash(state); + } + } + + impl PartialEq for xmmreg { + fn eq(&self, other: &xmmreg) -> bool { + self.xmm_env == other.xmm_env && + self.xmm_acc == other.xmm_acc && + self.xmm_reg == other.xmm_reg && + self.xmm_pad + .iter() + .zip(other.xmm_pad.iter()) + .all(|(a,b)| a == b) + } + } + impl Eq for xmmreg {} + impl ::fmt::Debug for xmmreg { + fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { + f.debug_struct("xmmreg") + .field("xmm_env", &self.xmm_env) + .field("xmm_acc", &self.xmm_acc) + .field("xmm_reg", &self.xmm_reg) + .field("xmm_pad", &&self.xmm_pad[..]) + .finish() + } + } + impl ::hash::Hash for xmmreg { + fn hash(&self, state: &mut H) { + self.xmm_env.hash(state); + self.xmm_acc.hash(state); + self.xmm_reg.hash(state); + self.xmm_pad.hash(state); + } + } + } +} // should be pub(crate), but that requires Rust 1.18.0 cfg_if! { @@ -15,6 +187,18 @@ } } pub const MAP_32BIT: ::c_int = 0x00080000; +pub const MINSIGSTKSZ: ::size_t = 2048; // 512 * 4 + +pub const _MC_HASSEGS: u32 = 0x1; +pub const _MC_HASBASES: u32 = 0x2; +pub const _MC_HASFPXSTATE: u32 = 0x4; +pub const _MC_FLAG_MASK: u32 = _MC_HASSEGS | _MC_HASBASES | _MC_HASFPXSTATE; + +pub const _MC_FPFMT_NODEV: c_long = 0x10000; +pub const _MC_FPFMT_XMM: c_long = 0x10002; +pub const _MC_FPOWNED_NONE: c_long = 0x20000; +pub const _MC_FPOWNED_FPU: c_long = 0x20001; +pub const _MC_FPOWNED_PCB: c_long = 0x20002; cfg_if! { if #[cfg(libc_align)] { diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86.rs cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ pub type c_char = i8; pub type c_long = i32; pub type c_ulong = u32; +pub type wchar_t = i32; pub type time_t = i32; pub type suseconds_t = i32; +pub type register_t = i32; s! { pub struct stat { @@ -38,6 +40,7 @@ pub const _ALIGNBYTES: usize = ::mem::size_of::<::c_long>() - 1; } else { #[doc(hidden)] - pub const _ALIGNBYTES: usize = 8 - 1; + pub const _ALIGNBYTES: usize = 4 - 1; } } +pub const MINSIGSTKSZ: ::size_t = 2048; // 512 * 4 diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/mod.rs cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/freebsdlike/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/freebsdlike/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -31,6 +31,14 @@ pub unsafe fn si_value(&self) -> ::sigval { self.si_value } + + pub unsafe fn si_pid(&self) -> ::pid_t { + self.si_pid + } + + pub unsafe fn si_uid(&self) -> ::uid_t { + self.si_uid + } } s! { @@ -218,6 +226,13 @@ pub tai: ::c_long, pub time_state: ::c_int, } + + pub struct ptrace_io_desc { + pub piod_op: ::c_int, + pub piod_offs: *mut ::c_void, + pub piod_addr: *mut ::c_void, + pub piod_len: ::size_t, + } } s_no_extra_traits! { @@ -623,6 +638,21 @@ pub const RUSAGE_SELF: ::c_int = 0; pub const RUSAGE_CHILDREN: ::c_int = -1; +pub const CLOCK_REALTIME: ::clockid_t = 0; +pub const CLOCK_VIRTUAL: ::clockid_t = 1; +pub const CLOCK_PROF: ::clockid_t = 2; +pub const CLOCK_MONOTONIC: ::clockid_t = 4; +pub const CLOCK_UPTIME: ::clockid_t = 5; +pub const CLOCK_UPTIME_PRECISE: ::clockid_t = 7; +pub const CLOCK_UPTIME_FAST: ::clockid_t = 8; +pub const CLOCK_REALTIME_PRECISE: ::clockid_t = 9; +pub const CLOCK_REALTIME_FAST: ::clockid_t = 10; +pub const CLOCK_MONOTONIC_PRECISE: ::clockid_t = 11; +pub const CLOCK_MONOTONIC_FAST: ::clockid_t = 12; +pub const CLOCK_SECOND: ::clockid_t = 13; +pub const CLOCK_THREAD_CPUTIME_ID: ::clockid_t = 14; +pub const CLOCK_PROCESS_CPUTIME_ID: ::clockid_t = 15; + pub const MADV_NORMAL: ::c_int = 0; pub const MADV_RANDOM: ::c_int = 1; pub const MADV_SEQUENTIAL: ::c_int = 2; @@ -713,6 +743,11 @@ pub const PF_ATM: ::c_int = AF_ATM; pub const PF_NETGRAPH: ::c_int = AF_NETGRAPH; +pub const PIOD_READ_D: ::c_int = 1; +pub const PIOD_WRITE_D: ::c_int = 2; +pub const PIOD_READ_I: ::c_int = 3; +pub const PIOD_WRITE_I: ::c_int = 4; + pub const PT_TRACE_ME: ::c_int = 0; pub const PT_READ_I: ::c_int = 1; pub const PT_READ_D: ::c_int = 2; @@ -1185,20 +1220,16 @@ pub const REG_ENOSYS: ::c_int = -1; pub const REG_ILLSEQ: ::c_int = 17; -f! { - pub fn WIFCONTINUED(status: ::c_int) -> bool { +safe_f! { + pub {const} fn WIFCONTINUED(status: ::c_int) -> bool { status == 0x13 } - pub fn WSTOPSIG(status: ::c_int) -> ::c_int { + pub {const} fn WSTOPSIG(status: ::c_int) -> ::c_int { status >> 8 } - pub fn WIFSIGNALED(status: ::c_int) -> bool { - (status & 0o177) != 0o177 && (status & 0o177) != 0 - } - - pub fn WIFSTOPPED(status: ::c_int) -> bool { + pub {const} fn WIFSTOPPED(status: ::c_int) -> bool { (status & 0o177) == 0o177 } } @@ -1219,17 +1250,6 @@ addrlen: *mut ::socklen_t, flags: ::c_int, ) -> ::c_int; - pub fn aio_read(aiocbp: *mut aiocb) -> ::c_int; - pub fn aio_write(aiocbp: *mut aiocb) -> ::c_int; - pub fn aio_fsync(op: ::c_int, aiocbp: *mut aiocb) -> ::c_int; - pub fn aio_error(aiocbp: *const aiocb) -> ::c_int; - pub fn aio_return(aiocbp: *mut aiocb) -> ::ssize_t; - pub fn aio_suspend( - aiocb_list: *const *const aiocb, - nitems: ::c_int, - timeout: *const ::timespec, - ) -> ::c_int; - pub fn aio_cancel(fd: ::c_int, aiocbp: *mut aiocb) -> ::c_int; pub fn chflags(path: *const ::c_char, flags: ::c_ulong) -> ::c_int; pub fn chflagsat( fd: ::c_int, @@ -1237,6 +1257,18 @@ flags: ::c_ulong, atflag: ::c_int, ) -> ::c_int; + + pub fn clock_getres(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int; + pub fn clock_gettime(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int; + pub fn clock_settime( + clk_id: ::clockid_t, + tp: *const ::timespec, + ) -> ::c_int; + pub fn clock_getcpuclockid( + pid: ::pid_t, + clk_id: *mut ::clockid_t, + ) -> ::c_int; + pub fn dirfd(dirp: *mut ::DIR) -> ::c_int; pub fn duplocale(base: ::locale_t) -> ::locale_t; pub fn endutxent(); @@ -1315,43 +1347,6 @@ mode: ::mode_t, dev: dev_t, ) -> ::c_int; - pub fn mq_close(mqd: ::mqd_t) -> ::c_int; - pub fn mq_getattr(mqd: ::mqd_t, attr: *mut ::mq_attr) -> ::c_int; - pub fn mq_notify(mqd: ::mqd_t, notification: *const ::sigevent) - -> ::c_int; - pub fn mq_open(name: *const ::c_char, oflag: ::c_int, ...) -> ::mqd_t; - pub fn mq_receive( - mqd: ::mqd_t, - msg_ptr: *mut ::c_char, - msg_len: ::size_t, - msg_prio: *mut ::c_uint, - ) -> ::ssize_t; - pub fn mq_send( - mqd: ::mqd_t, - msg_ptr: *const ::c_char, - msg_len: ::size_t, - msg_prio: ::c_uint, - ) -> ::c_int; - pub fn mq_setattr( - mqd: ::mqd_t, - newattr: *const ::mq_attr, - oldattr: *mut ::mq_attr, - ) -> ::c_int; - pub fn mq_timedreceive( - mqd: ::mqd_t, - msg_ptr: *mut ::c_char, - msg_len: ::size_t, - msg_prio: *mut ::c_uint, - abs_timeout: *const ::timespec, - ) -> ::ssize_t; - pub fn mq_timedsend( - mqd: ::mqd_t, - msg_ptr: *const ::c_char, - msg_len: ::size_t, - msg_prio: ::c_uint, - abs_timeout: *const ::timespec, - ) -> ::c_int; - pub fn mq_unlink(name: *const ::c_char) -> ::c_int; pub fn mincore( addr: *const ::c_void, len: ::size_t, @@ -1521,6 +1516,58 @@ pub fn ntp_gettime(buf: *mut ntptimeval) -> ::c_int; } +#[link(name = "rt")] +extern "C" { + pub fn aio_read(aiocbp: *mut aiocb) -> ::c_int; + pub fn aio_write(aiocbp: *mut aiocb) -> ::c_int; + pub fn aio_fsync(op: ::c_int, aiocbp: *mut aiocb) -> ::c_int; + pub fn aio_error(aiocbp: *const aiocb) -> ::c_int; + pub fn aio_return(aiocbp: *mut aiocb) -> ::ssize_t; + pub fn aio_suspend( + aiocb_list: *const *const aiocb, + nitems: ::c_int, + timeout: *const ::timespec, + ) -> ::c_int; + pub fn aio_cancel(fd: ::c_int, aiocbp: *mut aiocb) -> ::c_int; + pub fn mq_close(mqd: ::mqd_t) -> ::c_int; + pub fn mq_getattr(mqd: ::mqd_t, attr: *mut ::mq_attr) -> ::c_int; + pub fn mq_notify(mqd: ::mqd_t, notification: *const ::sigevent) + -> ::c_int; + pub fn mq_open(name: *const ::c_char, oflag: ::c_int, ...) -> ::mqd_t; + pub fn mq_receive( + mqd: ::mqd_t, + msg_ptr: *mut ::c_char, + msg_len: ::size_t, + msg_prio: *mut ::c_uint, + ) -> ::ssize_t; + pub fn mq_send( + mqd: ::mqd_t, + msg_ptr: *const ::c_char, + msg_len: ::size_t, + msg_prio: ::c_uint, + ) -> ::c_int; + pub fn mq_setattr( + mqd: ::mqd_t, + newattr: *const ::mq_attr, + oldattr: *mut ::mq_attr, + ) -> ::c_int; + pub fn mq_timedreceive( + mqd: ::mqd_t, + msg_ptr: *mut ::c_char, + msg_len: ::size_t, + msg_prio: *mut ::c_uint, + abs_timeout: *const ::timespec, + ) -> ::ssize_t; + pub fn mq_timedsend( + mqd: ::mqd_t, + msg_ptr: *const ::c_char, + msg_len: ::size_t, + msg_prio: ::c_uint, + abs_timeout: *const ::timespec, + ) -> ::c_int; + pub fn mq_unlink(name: *const ::c_char) -> ::c_int; +} + #[link(name = "util")] extern "C" { pub fn openpty( diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/mod.rs cargo-0.47.0/vendor/libc/src/unix/bsd/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,3 @@ -pub type wchar_t = i32; pub type off_t = i64; pub type useconds_t = u32; pub type blkcnt_t = i64; @@ -255,6 +254,8 @@ pub const PATH_MAX: ::c_int = 1024; +pub const IOV_MAX: ::c_int = 1024; + pub const SA_ONSTACK: ::c_int = 0x0001; pub const SA_SIGINFO: ::c_int = 0x0040; pub const SA_RESTART: ::c_int = 0x0002; @@ -505,6 +506,10 @@ pub const PRIO_PGRP: ::c_int = 1; pub const PRIO_USER: ::c_int = 2; +pub const ITIMER_REAL: ::c_int = 0; +pub const ITIMER_VIRTUAL: ::c_int = 1; +pub const ITIMER_PROF: ::c_int = 2; + f! { pub fn CMSG_FIRSTHDR(mhdr: *const ::msghdr) -> *mut ::cmsghdr { if (*mhdr).msg_controllen as usize >= ::mem::size_of::<::cmsghdr>() { @@ -539,24 +544,26 @@ *slot = 0; } } +} - pub fn WTERMSIG(status: ::c_int) -> ::c_int { +safe_f! { + pub {const} fn WTERMSIG(status: ::c_int) -> ::c_int { status & 0o177 } - pub fn WIFEXITED(status: ::c_int) -> bool { + pub {const} fn WIFEXITED(status: ::c_int) -> bool { (status & 0o177) == 0 } - pub fn WEXITSTATUS(status: ::c_int) -> ::c_int { + pub {const} fn WEXITSTATUS(status: ::c_int) -> ::c_int { status >> 8 } - pub fn WCOREDUMP(status: ::c_int) -> bool { + pub {const} fn WCOREDUMP(status: ::c_int) -> bool { (status & 0o200) != 0 } - pub fn QCMD(cmd: ::c_int, type_: ::c_int) -> ::c_int { + pub {const} fn QCMD(cmd: ::c_int, type_: ::c_int) -> ::c_int { (cmd << 8) | (type_ & 0x00ff) } } @@ -581,7 +588,15 @@ pub fn abs(i: ::c_int) -> ::c_int; pub fn atof(s: *const ::c_char) -> ::c_double; pub fn labs(i: ::c_long) -> ::c_long; + #[cfg_attr( + all(target_os = "freebsd", any(freebsd12, freebsd11, freebsd10)), + link_name = "rand@FBSD_1.0" + )] pub fn rand() -> ::c_int; + #[cfg_attr( + all(target_os = "freebsd", any(freebsd12, freebsd11, freebsd10)), + link_name = "srand@FBSD_1.0" + )] pub fn srand(seed: ::c_uint); pub fn getifaddrs(ifap: *mut *mut ::ifaddrs) -> ::c_int; @@ -840,6 +855,23 @@ options: ::c_int, rusage: *mut ::rusage, ) -> ::pid_t; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "getitimer$UNIX2003" + )] + pub fn getitimer( + which: ::c_int, + curr_value: *mut ::itimerval + ) -> ::c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "setitimer$UNIX2003" + )] + pub fn setitimer( + which: ::c_int, + new_value: *const ::itimerval, + old_value: *mut ::itimerval, + ) -> ::c_int; pub fn regcomp( preg: *mut regex_t, diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/netbsdlike/mod.rs cargo-0.47.0/vendor/libc/src/unix/bsd/netbsdlike/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/netbsdlike/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/netbsdlike/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,3 +1,4 @@ +pub type wchar_t = i32; pub type time_t = i64; pub type mode_t = u32; pub type nlink_t = u32; @@ -74,6 +75,13 @@ pub seq: ::c_ushort, pub key: ::key_t, } + + pub struct ptrace_io_desc { + pub piod_op: ::c_int, + pub piod_offs: *mut ::c_void, + pub piod_addr: *mut ::c_void, + pub piod_len: ::size_t, + } } pub const D_T_FMT: ::nl_item = 0; @@ -342,6 +350,12 @@ pub const PTHREAD_CREATE_JOINABLE: ::c_int = 0; pub const PTHREAD_CREATE_DETACHED: ::c_int = 1; +pub const PIOD_READ_D: ::c_int = 1; +pub const PIOD_WRITE_D: ::c_int = 2; +pub const PIOD_READ_I: ::c_int = 3; +pub const PIOD_WRITE_I: ::c_int = 4; +pub const PIOD_READ_AUXV: ::c_int = 5; + pub const PT_TRACE_ME: ::c_int = 0; pub const PT_READ_I: ::c_int = 1; pub const PT_READ_D: ::c_int = 2; diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/netbsdlike/netbsd/mod.rs cargo-0.47.0/vendor/libc/src/unix/bsd/netbsdlike/netbsd/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/netbsdlike/netbsd/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/netbsdlike/netbsd/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -92,7 +92,7 @@ pub st_spare: [u32; 2], } - pub struct addrinfo { + pub struct addrinfo { pub ai_flags: ::c_int, pub ai_family: ::c_int, pub ai_socktype: ::c_int, @@ -286,7 +286,7 @@ pub struct __exit_status { pub e_termination: u16, pub e_exit: u16, - } + } pub struct shmid_ds { pub shm_perm: ::ipc_perm, @@ -1699,18 +1699,6 @@ as ::c_uint } - pub fn WSTOPSIG(status: ::c_int) -> ::c_int { - status >> 8 - } - - pub fn WIFSIGNALED(status: ::c_int) -> bool { - (status & 0o177) != 0o177 && (status & 0o177) != 0 - } - - pub fn WIFSTOPPED(status: ::c_int) -> bool { - (status & 0o177) == 0o177 - } - // dirfd() is a macro on netbsd to access // the first field of the struct where dirp points to: // http://cvsweb.netbsd.org/bsdweb.cgi/src/include/dirent.h?rev=1.36 @@ -1718,10 +1706,6 @@ *(dirp as *const ::c_int) } - pub fn WIFCONTINUED(status: ::c_int) -> bool { - status == 0xffff - } - pub fn SOCKCREDSIZE(ngrps: usize) -> usize { let ngrps = if ngrps > 0 { ngrps - 1 @@ -1732,6 +1716,24 @@ } } +safe_f! { + pub {const} fn WSTOPSIG(status: ::c_int) -> ::c_int { + status >> 8 + } + + pub {const} fn WIFSIGNALED(status: ::c_int) -> bool { + (status & 0o177) != 0o177 && (status & 0o177) != 0 + } + + pub {const} fn WIFSTOPPED(status: ::c_int) -> bool { + (status & 0o177) == 0o177 + } + + pub {const} fn WIFCONTINUED(status: ::c_int) -> bool { + status == 0xffff + } +} + extern "C" { pub fn ntp_adjtime(buf: *mut timex) -> ::c_int; pub fn ntp_gettime(buf: *mut ntptimeval) -> ::c_int; diff -Nru cargo-0.44.1/vendor/libc/src/unix/bsd/netbsdlike/openbsd/mod.rs cargo-0.47.0/vendor/libc/src/unix/bsd/netbsdlike/openbsd/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/bsd/netbsdlike/openbsd/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/bsd/netbsdlike/openbsd/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1340,7 +1340,7 @@ } f! { - pub fn WIFCONTINUED(status: ::c_int) -> bool { + pub {const} fn WIFCONTINUED(status: ::c_int) -> bool { status & 0o177777 == 0o177777 } @@ -1375,17 +1375,19 @@ (_ALIGN(::mem::size_of::<::cmsghdr>()) + _ALIGN(length as usize)) as ::c_uint } +} - pub fn WSTOPSIG(status: ::c_int) -> ::c_int { +safe_f! { + pub {const} fn WSTOPSIG(status: ::c_int) -> ::c_int { status >> 8 } - pub fn WIFSIGNALED(status: ::c_int) -> bool { + pub {const} fn WIFSIGNALED(status: ::c_int) -> bool { (status & 0o177) != 0o177 && (status & 0o177) != 0 } - pub fn WIFSTOPPED(status: ::c_int) -> bool { - (status & 0o177) == 0o177 + pub {const} fn WIFSTOPPED(status: ::c_int) -> bool { + (status & 0xff) == 0o177 } } diff -Nru cargo-0.44.1/vendor/libc/src/unix/haiku/mod.rs cargo-0.47.0/vendor/libc/src/unix/haiku/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/haiku/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/haiku/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -38,6 +38,20 @@ } } +impl siginfo_t { + pub unsafe fn si_addr(&self) -> *mut ::c_void { + self.si_addr + } + + pub unsafe fn si_pid(&self) -> ::pid_t { + self.si_pid + } + + pub unsafe fn si_uid(&self) -> ::uid_t { + self.si_uid + } +} + s! { pub struct in_addr { pub s_addr: ::in_addr_t, @@ -962,6 +976,10 @@ pub const _SC_TIMERS: ::c_int = 58; pub const _SC_CPUTIME: ::c_int = 59; pub const _SC_THREAD_CPUTIME: ::c_int = 60; +pub const _SC_HOST_NAME_MAX: ::c_int = 61; +pub const _SC_REGEXP: ::c_int = 62; +pub const _SC_SYMLOOP_MAX: ::c_int = 63; +pub const _SC_SHELL: ::c_int = 64; pub const PTHREAD_STACK_MIN: ::size_t = 8192; @@ -1236,37 +1254,39 @@ *slot = 0; } } +} - pub fn WIFEXITED(status: ::c_int) -> bool { +safe_f! { + pub {const} fn WIFEXITED(status: ::c_int) -> bool { (status & !0xff) == 0 } - pub fn WEXITSTATUS(status: ::c_int) -> ::c_int { + pub {const} fn WEXITSTATUS(status: ::c_int) -> ::c_int { status & 0xff } - pub fn WIFSIGNALED(status: ::c_int) -> bool { + pub {const} fn WIFSIGNALED(status: ::c_int) -> bool { ((status >> 8) & 0xff) != 0 } - pub fn WTERMSIG(status: ::c_int) -> ::c_int { + pub {const} fn WTERMSIG(status: ::c_int) -> ::c_int { (status >> 8) & 0xff } - pub fn WIFSTOPPED(status: ::c_int) -> bool { + pub {const} fn WIFSTOPPED(status: ::c_int) -> bool { ((status >> 16) & 0xff) != 0 } - pub fn WSTOPSIG(status: ::c_int) -> ::c_int { + pub {const} fn WSTOPSIG(status: ::c_int) -> ::c_int { (status >> 16) & 0xff } // actually WIFCORED, but this is used everywhere else - pub fn WCOREDUMP(status: ::c_int) -> bool { + pub {const} fn WCOREDUMP(status: ::c_int) -> bool { (status & 0x10000) != 0 } - pub fn WIFCONTINUED(status: ::c_int) -> bool { + pub {const} fn WIFCONTINUED(status: ::c_int) -> bool { (status & 0x20000) != 0 } } @@ -1274,6 +1294,13 @@ extern "C" { pub fn getrlimit(resource: ::c_int, rlim: *mut ::rlimit) -> ::c_int; pub fn setrlimit(resource: ::c_int, rlim: *const ::rlimit) -> ::c_int; + pub fn getpriority(which: ::c_int, who: id_t) -> ::c_int; + pub fn setpriority( + which: ::c_int, + who: id_t, + priority: ::c_int, + ) -> ::c_int; + pub fn utimensat( fd: ::c_int, path: *const ::c_char, diff -Nru cargo-0.44.1/vendor/libc/src/unix/hermit/mod.rs cargo-0.47.0/vendor/libc/src/unix/hermit/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/hermit/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/hermit/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -943,16 +943,16 @@ pub const PRIO_PGRP: ::c_int = 1; pub const PRIO_USER: ::c_int = 2; -f! { - pub fn WEXITSTATUS(status: ::c_int) -> ::c_int { +safe_f! { + pub {const} fn WEXITSTATUS(status: ::c_int) -> ::c_int { (status >> 8) & 0xff } - pub fn WIFEXITED(status: ::c_int) -> bool { + pub {const} fn WIFEXITED(status: ::c_int) -> bool { (status & 0xff) == 0 } - pub fn WTERMSIG(status: ::c_int) -> ::c_int { + pub {const} fn WTERMSIG(status: ::c_int) -> ::c_int { status & 0x7f } } diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/android/b64/aarch64/align.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/android/b64/aarch64/align.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/android/b64/aarch64/align.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/android/b64/aarch64/align.rs 2020-10-01 21:38:28.000000000 +0000 @@ -5,3 +5,25 @@ priv_: [f32; 8] } } + +s! { + pub struct ucontext_t { + pub uc_flags: ::c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: ::stack_t, + pub uc_sigmask: ::sigset_t, + pub uc_mcontext: mcontext_t, + } + + #[repr(align(16))] + pub struct mcontext_t { + pub fault_address: ::c_ulonglong, + pub regs: [::c_ulonglong; 31], + pub sp: ::c_ulonglong, + pub pc: ::c_ulonglong, + pub pstate: ::c_ulonglong, + // nested arrays to get the right size/length while being able to + // auto-derive traits like Debug + __reserved: [[u64; 32]; 16], + } +} diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/android/b64/aarch64/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/android/b64/aarch64/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/android/b64/aarch64/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/android/b64/aarch64/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -322,7 +322,7 @@ pub const SYS_pkey_mprotect: ::c_long = 288; pub const SYS_pkey_alloc: ::c_long = 289; pub const SYS_pkey_free: ::c_long = 290; -pub const SYS_syscalls: ::c_long = 292; +pub const SYS_syscalls: ::c_long = 436; cfg_if! { if #[cfg(libc_align)] { diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/android/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/android/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/android/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/android/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -19,6 +19,7 @@ pub type rlim_t = ::c_ulong; pub type dev_t = ::c_ulong; pub type ino_t = ::c_ulong; +pub type ino64_t = u64; pub type __CPU_BITTYPE = ::c_ulong; pub type idtype_t = ::c_int; pub type loff_t = ::c_longlong; @@ -147,6 +148,11 @@ _pad: [u8; 28], } + pub struct itimerspec { + pub it_interval: ::timespec, + pub it_value: ::timespec, + } + pub struct ucred { pub pid: ::pid_t, pub uid: ::uid_t, @@ -218,6 +224,26 @@ pub ee_info: u32, pub ee_data: u32, } + + pub struct regex_t { + re_magic: ::c_int, + re_nsub: ::size_t, + re_endp: *const ::c_char, + re_guts: *mut ::c_void, + } + + pub struct regmatch_t { + pub rm_so: ::ssize_t, + pub rm_eo: ::ssize_t, + } + + pub struct sockaddr_vm { + pub svm_family: ::sa_family_t, + pub svm_reserved1: ::c_ushort, + pub svm_port: ::c_uint, + pub svm_cid: ::c_uint, + pub svm_zero: [u8; 4] + } } s_no_extra_traits! { @@ -629,11 +655,27 @@ pub const EPOLLRDHUP: ::c_int = 0x00002000; pub const EPOLLWAKEUP: ::c_int = 0x20000000; -pub const EFD_CLOEXEC: ::c_int = 0x80000; +// sys/eventfd.h +pub const EFD_SEMAPHORE: ::c_int = 0x1; +pub const EFD_CLOEXEC: ::c_int = O_CLOEXEC; +pub const EFD_NONBLOCK: ::c_int = O_NONBLOCK; + +// sys/timerfd.h +pub const TFD_CLOEXEC: ::c_int = O_CLOEXEC; +pub const TFD_NONBLOCK: ::c_int = O_NONBLOCK; +pub const TFD_TIMER_ABSTIME: ::c_int = 1; +pub const TFD_TIMER_CANCEL_ON_SET: ::c_int = 2; pub const USER_PROCESS: ::c_short = 7; +// linux/falloc.h +pub const FALLOC_FL_KEEP_SIZE: ::c_int = 0x01; +pub const FALLOC_FL_PUNCH_HOLE: ::c_int = 0x02; +pub const FALLOC_FL_NO_HIDE_STALE: ::c_int = 0x04; pub const FALLOC_FL_COLLAPSE_RANGE: ::c_int = 0x08; +pub const FALLOC_FL_ZERO_RANGE: ::c_int = 0x10; +pub const FALLOC_FL_INSERT_RANGE: ::c_int = 0x20; +pub const FALLOC_FL_UNSHARE_RANGE: ::c_int = 0x40; pub const BUFSIZ: ::c_uint = 1024; pub const FILENAME_MAX: ::c_uint = 4096; @@ -804,6 +846,11 @@ pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 2; pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_NORMAL; +// stdio.h +pub const RENAME_NOREPLACE: ::c_int = 1; +pub const RENAME_EXCHANGE: ::c_int = 2; +pub const RENAME_WHITEOUT: ::c_int = 4; + pub const FIOCLEX: ::c_int = 0x5451; pub const FIONCLEX: ::c_int = 0x5450; @@ -1035,6 +1082,13 @@ pub const O_DSYNC: ::c_int = 4096; pub const NI_MAXHOST: ::size_t = 1025; +pub const NI_MAXSERV: ::size_t = 32; + +pub const NI_NOFQDN: ::c_int = 0x00000001; +pub const NI_NUMERICHOST: ::c_int = 0x00000002; +pub const NI_NAMEREQD: ::c_int = 0x00000004; +pub const NI_NUMERICSERV: ::c_int = 0x00000008; +pub const NI_DGRAM: ::c_int = 0x00000010; pub const NCCS: usize = 19; pub const TCSBRKP: ::c_int = 0x5425; @@ -1097,7 +1151,7 @@ pub const PTRACE_GETSIGINFO: ::c_int = 0x4202; pub const PTRACE_SETSIGINFO: ::c_int = 0x4203; -pub const EFD_NONBLOCK: ::c_int = 0x800; +pub const PTRACE_EVENT_STOP: ::c_int = 128; pub const F_GETLK: ::c_int = 5; pub const F_GETOWN: ::c_int = 9; @@ -1207,6 +1261,41 @@ pub const LINUX_REBOOT_CMD_SW_SUSPEND: ::c_int = 0xD000FCE2; pub const LINUX_REBOOT_CMD_KEXEC: ::c_int = 0x45584543; +pub const REG_BASIC: ::c_int = 0; +pub const REG_EXTENDED: ::c_int = 1; +pub const REG_ICASE: ::c_int = 2; +pub const REG_NOSUB: ::c_int = 4; +pub const REG_NEWLINE: ::c_int = 8; +pub const REG_NOSPEC: ::c_int = 16; +pub const REG_PEND: ::c_int = 32; +pub const REG_DUMP: ::c_int = 128; + +pub const REG_NOMATCH: ::c_int = 1; +pub const REG_BADPAT: ::c_int = 2; +pub const REG_ECOLLATE: ::c_int = 3; +pub const REG_ECTYPE: ::c_int = 4; +pub const REG_EESCAPE: ::c_int = 5; +pub const REG_ESUBREG: ::c_int = 6; +pub const REG_EBRACK: ::c_int = 7; +pub const REG_EPAREN: ::c_int = 8; +pub const REG_EBRACE: ::c_int = 9; +pub const REG_BADBR: ::c_int = 10; +pub const REG_ERANGE: ::c_int = 11; +pub const REG_ESPACE: ::c_int = 12; +pub const REG_BADRPT: ::c_int = 13; +pub const REG_EMPTY: ::c_int = 14; +pub const REG_ASSERT: ::c_int = 15; +pub const REG_INVARG: ::c_int = 16; +pub const REG_ATOI: ::c_int = 255; +pub const REG_ITOA: ::c_int = 256; + +pub const REG_NOTBOL: ::c_int = 1; +pub const REG_NOTEOL: ::c_int = 2; +pub const REG_STARTEND: ::c_int = 4; +pub const REG_TRACE: ::c_int = 256; +pub const REG_LARGE: ::c_int = 512; +pub const REG_BACKR: ::c_int = 1024; + pub const MCL_CURRENT: ::c_int = 0x0001; pub const MCL_FUTURE: ::c_int = 0x0002; @@ -2148,6 +2237,24 @@ pub const PRIO_PGRP: ::c_int = 1; pub const PRIO_USER: ::c_int = 2; +// linux/sched.h +pub const SCHED_NORMAL: ::c_int = 0; +pub const SCHED_FIFO: ::c_int = 1; +pub const SCHED_RR: ::c_int = 2; +pub const SCHED_BATCH: ::c_int = 3; +pub const SCHED_IDLE: ::c_int = 5; +pub const SCHED_DEADLINE: ::c_int = 6; + +// bits/seek_constants.h +pub const SEEK_DATA: ::c_int = 3; +pub const SEEK_HOLE: ::c_int = 4; + +// sys/socket.h +pub const AF_NFC: ::c_int = 39; +pub const AF_VSOCK: ::c_int = 40; +pub const PF_NFC: ::c_int = AF_NFC; +pub const PF_VSOCK: ::c_int = AF_VSOCK; + f! { pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { @@ -2218,6 +2325,18 @@ pub fn setrlimit64(resource: ::c_int, rlim: *const rlimit64) -> ::c_int; pub fn getrlimit(resource: ::c_int, rlim: *mut ::rlimit) -> ::c_int; pub fn setrlimit(resource: ::c_int, rlim: *const ::rlimit) -> ::c_int; + pub fn prlimit( + pid: ::pid_t, + resource: ::c_int, + new_limit: *const ::rlimit, + old_limit: *mut ::rlimit, + ) -> ::c_int; + pub fn prlimit64( + pid: ::pid_t, + resource: ::c_int, + new_limit: *const ::rlimit64, + old_limit: *mut ::rlimit64, + ) -> ::c_int; pub fn strerror_r( errnum: ::c_int, buf: *mut c_char, @@ -2273,16 +2392,101 @@ pub fn setutent(); pub fn getutent() -> *mut utmp; + pub fn fallocate( + fd: ::c_int, + mode: ::c_int, + offset: ::off_t, + len: ::off_t, + ) -> ::c_int; + pub fn fallocate64( + fd: ::c_int, + mode: ::c_int, + offset: ::off64_t, + len: ::off64_t, + ) -> ::c_int; pub fn posix_fallocate( fd: ::c_int, offset: ::off_t, len: ::off_t, ) -> ::c_int; + pub fn posix_fallocate64( + fd: ::c_int, + offset: ::off64_t, + len: ::off64_t, + ) -> ::c_int; + pub fn getxattr( + path: *const c_char, + name: *const c_char, + value: *mut ::c_void, + size: ::size_t, + ) -> ::ssize_t; + pub fn lgetxattr( + path: *const c_char, + name: *const c_char, + value: *mut ::c_void, + size: ::size_t, + ) -> ::ssize_t; + pub fn fgetxattr( + filedes: ::c_int, + name: *const c_char, + value: *mut ::c_void, + size: ::size_t, + ) -> ::ssize_t; + pub fn setxattr( + path: *const c_char, + name: *const c_char, + value: *const ::c_void, + size: ::size_t, + flags: ::c_int, + ) -> ::c_int; + pub fn lsetxattr( + path: *const c_char, + name: *const c_char, + value: *const ::c_void, + size: ::size_t, + flags: ::c_int, + ) -> ::c_int; + pub fn fsetxattr( + filedes: ::c_int, + name: *const c_char, + value: *const ::c_void, + size: ::size_t, + flags: ::c_int, + ) -> ::c_int; + pub fn listxattr( + path: *const c_char, + list: *mut c_char, + size: ::size_t, + ) -> ::ssize_t; + pub fn llistxattr( + path: *const c_char, + list: *mut c_char, + size: ::size_t, + ) -> ::ssize_t; + pub fn flistxattr( + filedes: ::c_int, + list: *mut c_char, + size: ::size_t, + ) -> ::ssize_t; + pub fn removexattr(path: *const c_char, name: *const c_char) -> ::c_int; + pub fn lremovexattr(path: *const c_char, name: *const c_char) -> ::c_int; + pub fn fremovexattr(filedes: ::c_int, name: *const c_char) -> ::c_int; pub fn signalfd( fd: ::c_int, mask: *const ::sigset_t, flags: ::c_int, ) -> ::c_int; + pub fn timerfd_create(clock: ::clockid_t, flags: ::c_int) -> ::c_int; + pub fn timerfd_gettime( + fd: ::c_int, + current_value: *mut itimerspec, + ) -> ::c_int; + pub fn timerfd_settime( + fd: ::c_int, + flags: ::c_int, + new_value: *const itimerspec, + old_value: *mut itimerspec, + ) -> ::c_int; pub fn syscall(num: ::c_long, ...) -> ::c_long; pub fn sched_getaffinity( pid: ::pid_t, @@ -2460,6 +2664,11 @@ buflen: ::size_t, result: *mut *mut passwd, ) -> ::c_int; + pub fn sigtimedwait( + set: *const sigset_t, + info: *mut siginfo_t, + timeout: *const ::timespec, + ) -> ::c_int; pub fn sigwait(set: *const sigset_t, sig: *mut ::c_int) -> ::c_int; pub fn pthread_atfork( prepare: ::Option, @@ -2513,6 +2722,29 @@ path: *const ::c_char, mask: u32, ) -> ::c_int; + + pub fn regcomp( + preg: *mut ::regex_t, + pattern: *const ::c_char, + cflags: ::c_int, + ) -> ::c_int; + + pub fn regexec( + preg: *const ::regex_t, + input: *const ::c_char, + nmatch: ::size_t, + pmatch: *mut regmatch_t, + eflags: ::c_int, + ) -> ::c_int; + + pub fn regerror( + errcode: ::c_int, + preg: *const ::regex_t, + errbuf: *mut ::c_char, + errbuf_size: ::size_t, + ) -> ::size_t; + + pub fn regfree(preg: *mut ::regex_t); } cfg_if! { diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b32/arm/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b32/arm/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b32/arm/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b32/arm/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -868,6 +868,8 @@ pub const SYS_pkey_alloc: ::c_long = 395; pub const SYS_pkey_free: ::c_long = 396; pub const SYS_statx: ::c_long = 397; +pub const SYS_pidfd_open: ::c_long = 434; +pub const SYS_clone3: ::c_long = 435; cfg_if! { if #[cfg(libc_align)] { diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b32/mips/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b32/mips/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b32/mips/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b32/mips/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -527,6 +527,8 @@ pub const SYS_pkey_alloc: ::c_long = 4000 + 364; pub const SYS_pkey_free: ::c_long = 4000 + 365; pub const SYS_statx: ::c_long = 4000 + 366; +pub const SYS_pidfd_open: ::c_long = 4000 + 434; +pub const SYS_clone3: ::c_long = 4000 + 435; pub const O_DIRECT: ::c_int = 0x8000; pub const O_DIRECTORY: ::c_int = 0x10000; diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b32/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b32/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b32/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b32/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -115,6 +115,12 @@ pub mem_unit: ::c_uint, pub _f: [::c_char; 8], } + + pub struct ip_mreqn { + pub imr_multiaddr: ::in_addr, + pub imr_address: ::in_addr, + pub imr_ifindex: ::c_int, + } } pub const O_NOATIME: ::c_int = 0o1000000; diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b32/powerpc.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b32/powerpc.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b32/powerpc.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b32/powerpc.rs 2020-10-01 21:38:28.000000000 +0000 @@ -872,3 +872,5 @@ pub const SYS_pwritev2: ::c_long = 381; pub const SYS_kexec_file_load: ::c_long = 382; pub const SYS_statx: ::c_long = 383; +pub const SYS_pidfd_open: ::c_long = 434; +pub const SYS_clone3: ::c_long = 435; diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b32/sparc/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b32/sparc/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b32/sparc/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b32/sparc/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,6 +1,6 @@ //! SPARC-specific definitions for 32-bit linux-like values -pub type c_char = u8; +pub type c_char = i8; pub type wchar_t = i32; s! { @@ -961,6 +961,9 @@ pub const SYS_preadv2: ::c_long = 358; pub const SYS_pwritev2: ::c_long = 359; pub const SYS_statx: ::c_long = 360; +pub const SYS_pidfd_open: ::c_long = 434; +// Reserved in the kernel, but not actually implemented yet +pub const SYS_clone3: ::c_long = 435; #[link(name = "util")] extern "C" { diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b32/x86/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b32/x86/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b32/x86/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b32/x86/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1103,6 +1103,8 @@ pub const SYS_pkey_alloc: ::c_long = 381; pub const SYS_pkey_free: ::c_long = 382; pub const SYS_statx: ::c_long = 383; +pub const SYS_pidfd_open: ::c_long = 434; +pub const SYS_clone3: ::c_long = 435; // offsets in user_regs_structs, from sys/reg.h pub const EBX: ::c_int = 0; diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -203,6 +203,12 @@ pub ss_flags: ::c_int, pub ss_size: ::size_t } + + pub struct ip_mreqn { + pub imr_multiaddr: ::in_addr, + pub imr_address: ::in_addr, + pub imr_ifindex: ::c_int, + } } pub const VEOF: usize = 4; @@ -756,6 +762,10 @@ pub const SYS_mount: ::c_long = 40; pub const SYS_pivot_root: ::c_long = 41; pub const SYS_nfsservctl: ::c_long = 42; +pub const SYS_statfs: ::c_long = 43; +pub const SYS_fstatfs: ::c_long = 44; +pub const SYS_truncate: ::c_long = 45; +pub const SYS_ftruncate: ::c_long = 46; pub const SYS_fallocate: ::c_long = 47; pub const SYS_faccessat: ::c_long = 48; pub const SYS_chdir: ::c_long = 49; @@ -983,6 +993,8 @@ pub const SYS_pkey_alloc: ::c_long = 289; pub const SYS_pkey_free: ::c_long = 290; pub const SYS_statx: ::c_long = 291; +pub const SYS_pidfd_open: ::c_long = 434; +pub const SYS_clone3: ::c_long = 435; #[link(name = "util")] extern "C" { diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b64/mips64/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b64/mips64/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b64/mips64/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b64/mips64/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -577,6 +577,8 @@ pub const SYS_pkey_alloc: ::c_long = 5000 + 324; pub const SYS_pkey_free: ::c_long = 5000 + 325; pub const SYS_statx: ::c_long = 5000 + 326; +pub const SYS_pidfd_open: ::c_long = 5000 + 434; +pub const SYS_clone3: ::c_long = 5000 + 435; pub const SFD_CLOEXEC: ::c_int = 0x080000; diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b64/powerpc64/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b64/powerpc64/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b64/powerpc64/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b64/powerpc64/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -190,6 +190,12 @@ pub ss_flags: ::c_int, pub ss_size: ::size_t } + + pub struct ip_mreqn { + pub imr_multiaddr: ::in_addr, + pub imr_address: ::in_addr, + pub imr_ifindex: ::c_int, + } } pub const POSIX_FADV_DONTNEED: ::c_int = 4; @@ -1037,6 +1043,8 @@ pub const SYS_pwritev2: ::c_long = 381; pub const SYS_kexec_file_load: ::c_long = 382; pub const SYS_statx: ::c_long = 383; +pub const SYS_pidfd_open: ::c_long = 434; +pub const SYS_clone3: ::c_long = 435; #[link(name = "util")] extern "C" { diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b64/riscv64/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b64/riscv64/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b64/riscv64/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b64/riscv64/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -191,6 +191,12 @@ pub l_len: ::off64_t, pub l_pid: ::pid_t, } + + pub struct ip_mreqn { + pub imr_multiaddr: ::in_addr, + pub imr_address: ::in_addr, + pub imr_ifindex: ::c_int, + } } pub const POSIX_FADV_DONTNEED: ::c_int = 4; @@ -857,3 +863,5 @@ pub const SYS_pkey_alloc: ::c_long = 289; pub const SYS_pkey_free: ::c_long = 290; pub const SYS_statx: ::c_long = 291; +pub const SYS_pidfd_open: ::c_long = 434; +pub const SYS_clone3: ::c_long = 435; diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b64/s390x.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b64/s390x.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b64/s390x.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b64/s390x.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1007,6 +1007,8 @@ pub const SYS_setfsgid: ::c_long = 216; pub const SYS_newfstatat: ::c_long = 293; pub const SYS_statx: ::c_long = 379; +pub const SYS_pidfd_open: ::c_long = 434; +pub const SYS_clone3: ::c_long = 435; #[link(name = "util")] extern "C" { diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b64/sparc64/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b64/sparc64/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b64/sparc64/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b64/sparc64/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -973,6 +973,9 @@ pub const SYS_preadv2: ::c_long = 358; pub const SYS_pwritev2: ::c_long = 359; pub const SYS_statx: ::c_long = 360; +pub const SYS_pidfd_open: ::c_long = 434; +// Reserved in the kernel, but not actually implemented yet +pub const SYS_clone3: ::c_long = 435; #[link(name = "util")] extern "C" { diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/not_x32.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/not_x32.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/not_x32.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/not_x32.rs 2020-10-01 21:38:28.000000000 +0000 @@ -409,8 +409,9 @@ pub const SYS_pkey_alloc: ::c_long = 330; pub const SYS_pkey_free: ::c_long = 331; pub const SYS_statx: ::c_long = 332; +pub const SYS_pidfd_open: ::c_long = 434; +pub const SYS_clone3: ::c_long = 435; -#[link(name = "util")] extern "C" { pub fn sysctl( name: *mut ::c_int, diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/x32.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/x32.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/x32.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/x32.rs 2020-10-01 21:38:28.000000000 +0000 @@ -337,6 +337,8 @@ pub const SYS_pkey_alloc: ::c_long = __X32_SYSCALL_BIT + 330; pub const SYS_pkey_free: ::c_long = __X32_SYSCALL_BIT + 331; pub const SYS_statx: ::c_long = __X32_SYSCALL_BIT + 332; +pub const SYS_pidfd_open: ::c_long = __X32_SYSCALL_BIT + 434; +pub const SYS_clone3: ::c_long = __X32_SYSCALL_BIT + 435; pub const SYS_rt_sigaction: ::c_long = __X32_SYSCALL_BIT + 512; pub const SYS_rt_sigreturn: ::c_long = __X32_SYSCALL_BIT + 513; pub const SYS_ioctl: ::c_long = __X32_SYSCALL_BIT + 514; diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/gnu/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/gnu/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -313,6 +313,68 @@ } } +cfg_if! { + if #[cfg(libc_union)] { + // Internal, for casts to access union fields + #[repr(C)] + struct sifields_sigchld { + si_pid: ::pid_t, + si_uid: ::uid_t, + si_status: ::c_int, + si_utime: ::c_long, + si_stime: ::c_long, + } + impl ::Copy for sifields_sigchld {} + impl ::Clone for sifields_sigchld { + fn clone(&self) -> sifields_sigchld { + *self + } + } + + // Internal, for casts to access union fields + #[repr(C)] + union sifields { + _align_pointer: *mut ::c_void, + sigchld: sifields_sigchld, + } + + // Internal, for casts to access union fields. Note that some variants + // of sifields start with a pointer, which makes the alignment of + // sifields vary on 32-bit and 64-bit architectures. + #[repr(C)] + struct siginfo_f { + _siginfo_base: [::c_int; 3], + sifields: sifields, + } + + impl siginfo_t { + unsafe fn sifields(&self) -> &sifields { + &(*(self as *const siginfo_t as *const siginfo_f)).sifields + } + + pub unsafe fn si_pid(&self) -> ::pid_t { + self.sifields().sigchld.si_pid + } + + pub unsafe fn si_uid(&self) -> ::uid_t { + self.sifields().sigchld.si_uid + } + + pub unsafe fn si_status(&self) -> ::c_int { + self.sifields().sigchld.si_status + } + + pub unsafe fn si_utime(&self) -> ::c_long { + self.sifields().sigchld.si_utime + } + + pub unsafe fn si_stime(&self) -> ::c_long { + self.sifields().sigchld.si_stime + } + } + } +} + s_no_extra_traits! { pub struct utmpx { pub ut_type: ::c_short, @@ -664,32 +726,50 @@ pub const ADFS_SUPER_MAGIC: ::c_long = 0x0000adf5; pub const AFFS_SUPER_MAGIC: ::c_long = 0x0000adff; +pub const AFS_SUPER_MAGIC: ::c_long = 0x5346414f; +pub const AUTOFS_SUPER_MAGIC: ::c_long = 0x0187; +pub const BINDERFS_SUPER_MAGIC: ::c_long = 0x6c6f6f70; +pub const BTRFS_SUPER_MAGIC: ::c_long = 0x9123683e; +pub const CGROUP2_SUPER_MAGIC: ::c_long = 0x63677270; +pub const CGROUP_SUPER_MAGIC: ::c_long = 0x27e0eb; pub const CODA_SUPER_MAGIC: ::c_long = 0x73757245; pub const CRAMFS_MAGIC: ::c_long = 0x28cd3d45; +pub const DEVPTS_SUPER_MAGIC: ::c_long = 0x1cd1; +pub const ECRYPTFS_SUPER_MAGIC: ::c_long = 0xf15f; pub const EFS_SUPER_MAGIC: ::c_long = 0x00414a53; pub const EXT2_SUPER_MAGIC: ::c_long = 0x0000ef53; pub const EXT3_SUPER_MAGIC: ::c_long = 0x0000ef53; pub const EXT4_SUPER_MAGIC: ::c_long = 0x0000ef53; +pub const F2FS_SUPER_MAGIC: ::c_long = 0xf2f52010; +pub const FUTEXFS_SUPER_MAGIC: ::c_long = 0xbad1dea; +pub const HOSTFS_SUPER_MAGIC: ::c_long = 0x00c0ffee; pub const HPFS_SUPER_MAGIC: ::c_long = 0xf995e849; pub const HUGETLBFS_MAGIC: ::c_long = 0x958458f6; pub const ISOFS_SUPER_MAGIC: ::c_long = 0x00009660; pub const JFFS2_SUPER_MAGIC: ::c_long = 0x000072b6; -pub const MINIX_SUPER_MAGIC: ::c_long = 0x0000137f; -pub const MINIX_SUPER_MAGIC2: ::c_long = 0x0000138f; -pub const MINIX2_SUPER_MAGIC: ::c_long = 0x00002468; pub const MINIX2_SUPER_MAGIC2: ::c_long = 0x00002478; +pub const MINIX2_SUPER_MAGIC: ::c_long = 0x00002468; +pub const MINIX3_SUPER_MAGIC: ::c_long = 0x4d5a; +pub const MINIX_SUPER_MAGIC2: ::c_long = 0x0000138f; +pub const MINIX_SUPER_MAGIC: ::c_long = 0x0000137f; pub const MSDOS_SUPER_MAGIC: ::c_long = 0x00004d44; pub const NCP_SUPER_MAGIC: ::c_long = 0x0000564c; pub const NFS_SUPER_MAGIC: ::c_long = 0x00006969; +pub const NILFS_SUPER_MAGIC: ::c_long = 0x3434; +pub const OCFS2_SUPER_MAGIC: ::c_long = 0x7461636f; pub const OPENPROM_SUPER_MAGIC: ::c_long = 0x00009fa1; +pub const OVERLAYFS_SUPER_MAGIC: ::c_long = 0x794c7630; pub const PROC_SUPER_MAGIC: ::c_long = 0x00009fa0; pub const QNX4_SUPER_MAGIC: ::c_long = 0x0000002f; +pub const QNX6_SUPER_MAGIC: ::c_long = 0x68191122; +pub const RDTGROUP_SUPER_MAGIC: ::c_long = 0x7655821; pub const REISERFS_SUPER_MAGIC: ::c_long = 0x52654973; pub const SMB_SUPER_MAGIC: ::c_long = 0x0000517b; pub const TMPFS_MAGIC: ::c_long = 0x01021994; +pub const UDF_SUPER_MAGIC: ::c_long = 0x15013346; pub const USBDEVICE_SUPER_MAGIC: ::c_long = 0x00009fa2; -pub const CGROUP_SUPER_MAGIC: ::c_long = 0x27e0eb; -pub const CGROUP2_SUPER_MAGIC: ::c_long = 0x63677270; +pub const XENFS_SUPER_MAGIC: ::c_long = 0xabba1974; +pub const XFS_SUPER_MAGIC: ::c_long = 0x58465342; pub const CPU_SETSIZE: ::c_int = 0x400; @@ -753,6 +833,10 @@ pub const NDA_LINK_NETNSID: ::c_ushort = 10; pub const NDA_SRC_VNI: ::c_ushort = 11; +// linux/personality.h +pub const UNAME26: ::c_int = 0x0020000; +pub const FDPIC_FUNCPTRS: ::c_int = 0x0080000; + // linux/if_addr.h pub const IFA_FLAGS: ::c_ushort = 8; @@ -1278,7 +1362,6 @@ ) -> ::c_int; } -#[link(name = "util")] extern "C" { pub fn ioctl(fd: ::c_int, request: ::c_ulong, ...) -> ::c_int; pub fn backtrace(buf: *mut *mut ::c_void, sz: ::c_int) -> ::c_int; @@ -1352,7 +1435,6 @@ ) -> ::c_int; } -#[link(name = "dl")] extern "C" { pub fn dlmopen( lmid: Lmid_t, diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -2041,6 +2041,8 @@ pub const SIOCGIFMAP: ::c_ulong = 0x00008970; pub const SIOCSIFMAP: ::c_ulong = 0x00008971; +pub const PTRACE_EVENT_STOP: ::c_int = 128; + pub const IPTOS_TOS_MASK: u8 = 0x1E; pub const IPTOS_PREC_MASK: u8 = 0xE0; @@ -2355,7 +2357,13 @@ // uapi/linux/vm_sockets.h pub const VMADDR_CID_ANY: ::c_uint = 0xFFFFFFFF; pub const VMADDR_CID_HYPERVISOR: ::c_uint = 0; +#[deprecated( + since = "0.2.74", + note = "VMADDR_CID_RESERVED is removed since Linux v5.6 and \ + replaced with VMADDR_CID_LOCAL" +)] pub const VMADDR_CID_RESERVED: ::c_uint = 1; +pub const VMADDR_CID_LOCAL: ::c_uint = 1; pub const VMADDR_CID_HOST: ::c_uint = 2; pub const VMADDR_PORT_ANY: ::c_uint = 0xFFFFFFFF; diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/musl/b32/hexagon.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/musl/b32/hexagon.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/musl/b32/hexagon.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/musl/b32/hexagon.rs 2020-10-01 21:38:28.000000000 +0000 @@ -222,6 +222,7 @@ pub const ESTALE: ::c_int = 116; pub const ESTRPIPE: ::c_int = 86; pub const ETOOMANYREFS: ::c_int = 109; +pub const ETIMEDOUT: ::c_int = 110; pub const EUCLEAN: ::c_int = 117; pub const EUNATCH: ::c_int = 49; pub const EUSERS: ::c_int = 87; @@ -299,6 +300,8 @@ pub const SIGPROF: ::c_int = 27; pub const SIGPWR: ::c_int = 30; pub const SIGSTKFLT: ::c_int = 16; +pub const SIGSTKSZ: ::size_t = 8192; +pub const MINSIGSTKSZ: ::size_t = 2048; pub const SIGSTOP: ::c_int = 19; pub const SIGSYS: ::c_int = 31; pub const SIGTSTP: ::c_int = 20; diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/musl/b64/aarch64/align.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/musl/b64/aarch64/align.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/musl/b64/aarch64/align.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/musl/b64/aarch64/align.rs 2020-10-01 21:38:28.000000000 +0000 @@ -5,3 +5,21 @@ priv_: [f32; 8] } } + +s!{ + pub struct ucontext_t { + pub uc_flags: ::c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: ::stack_t, + pub uc_sigmask: ::sigset_t, + pub uc_mcontext: mcontext_t, + } + + #[repr(align(16))] + pub struct mcontext_t { + // What we want here is a single [u64; 36 + 512], but splitting things + // up allows Debug to be auto-derived. + __regs1: [[u64; 18]; 2], // 36 + __regs2: [[u64; 32]; 16], // 512 + } +} diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/musl/b64/aarch64/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/musl/b64/aarch64/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/musl/b64/aarch64/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/musl/b64/aarch64/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -170,7 +170,7 @@ pub const MAP_NONBLOCK: ::c_int = 0x010000; pub const MAP_STACK: ::c_int = 0x020000; pub const MAP_HUGETLB: ::c_int = 0x040000; -pub const MAP_SYNC : ::c_int = 0x080000; +pub const MAP_SYNC: ::c_int = 0x080000; pub const SOCK_STREAM: ::c_int = 1; pub const SOCK_DGRAM: ::c_int = 2; @@ -284,6 +284,10 @@ pub const SYS_mount: ::c_long = 40; pub const SYS_pivot_root: ::c_long = 41; pub const SYS_nfsservctl: ::c_long = 42; +pub const SYS_statfs: ::c_long = 43; +pub const SYS_fstatfs: ::c_long = 44; +pub const SYS_truncate: ::c_long = 45; +pub const SYS_ftruncate: ::c_long = 46; pub const SYS_fallocate: ::c_long = 47; pub const SYS_faccessat: ::c_long = 48; pub const SYS_chdir: ::c_long = 49; diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/musl/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/musl/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/linux/musl/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/linux/musl/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -138,6 +138,17 @@ pub rt_window: ::c_ulong, pub rt_irtt: ::c_ushort, } + + pub struct ip_mreqn { + pub imr_multiaddr: ::in_addr, + pub imr_address: ::in_addr, + pub imr_ifindex: ::c_int, + } + + pub struct __exit_status { + pub e_termination: ::c_short, + pub e_exit: ::c_short, + } } s_no_extra_traits! { @@ -157,6 +168,36 @@ pub mem_unit: ::c_uint, pub __reserved: [::c_char; 256], } + + // FIXME: musl added paddings and adjusted + // layout in 1.2.0 but our CI is still 1.1.24. + // So, I'm leaving some fields as comments for now. + // ref. https://github.com/bminor/musl/commit/ + // 1e7f0fcd7ff2096904fd93a2ee6d12a2392be392 + pub struct utmpx { + pub ut_type: ::c_short, + //__ut_pad1: ::c_short, + pub ut_pid: ::pid_t, + pub ut_line: [::c_char; 32], + pub ut_id: [::c_char; 4], + pub ut_user: [::c_char; 32], + pub ut_host: [::c_char; 256], + pub ut_exit: __exit_status, + + //#[cfg(target_endian = "little")] + pub ut_session: ::c_long, + //#[cfg(target_endian = "little")] + //__ut_pad2: ::c_long, + + //#[cfg(not(target_endian = "little"))] + //__ut_pad2: ::c_int, + //#[cfg(not(target_endian = "little"))] + //pub ut_session: ::c_int, + + pub ut_tv: ::timeval, + pub ut_addr_v6: [::c_uint; 4], + __unused: [::c_char; 20], + } } cfg_if! { @@ -225,6 +266,68 @@ self.__reserved.hash(state); } } + + impl PartialEq for utmpx { + fn eq(&self, other: &utmpx) -> bool { + self.ut_type == other.ut_type + //&& self.__ut_pad1 == other.__ut_pad1 + && self.ut_pid == other.ut_pid + && self.ut_line == other.ut_line + && self.ut_id == other.ut_id + && self.ut_user == other.ut_user + && self + .ut_host + .iter() + .zip(other.ut_host.iter()) + .all(|(a,b)| a == b) + && self.ut_exit == other.ut_exit + && self.ut_session == other.ut_session + //&& self.__ut_pad2 == other.__ut_pad2 + && self.ut_tv == other.ut_tv + && self.ut_addr_v6 == other.ut_addr_v6 + && self.__unused == other.__unused + } + } + + impl Eq for utmpx {} + + impl ::fmt::Debug for utmpx { + fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { + f.debug_struct("utmpx") + .field("ut_type", &self.ut_type) + //.field("__ut_pad1", &self.__ut_pad1) + .field("ut_pid", &self.ut_pid) + .field("ut_line", &self.ut_line) + .field("ut_id", &self.ut_id) + .field("ut_user", &self.ut_user) + //FIXME: .field("ut_host", &self.ut_host) + .field("ut_exit", &self.ut_exit) + .field("ut_session", &self.ut_session) + //.field("__ut_pad2", &self.__ut_pad2) + .field("ut_tv", &self.ut_tv) + .field("ut_addr_v6", &self.ut_addr_v6) + .field("__unused", &self.__unused) + .finish() + } + } + + impl ::hash::Hash for utmpx { + fn hash(&self, state: &mut H) { + self.ut_type.hash(state); + //self.__ut_pad1.hash(state); + self.ut_pid.hash(state); + self.ut_line.hash(state); + self.ut_id.hash(state); + self.ut_user.hash(state); + self.ut_host.hash(state); + self.ut_exit.hash(state); + self.ut_session.hash(state); + //self.__ut_pad2.hash(state); + self.ut_tv.hash(state); + self.ut_addr_v6.hash(state); + self.__unused.hash(state); + } + } } } diff -Nru cargo-0.44.1/vendor/libc/src/unix/linux_like/mod.rs cargo-0.47.0/vendor/libc/src/unix/linux_like/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/linux_like/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/linux_like/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -778,6 +778,7 @@ pub const IP_MTU_DISCOVER: ::c_int = 10; pub const IP_RECVTOS: ::c_int = 13; pub const IP_RECVERR: ::c_int = 11; +pub const IP_FREEBIND: ::c_int = 15; pub const IP_ADD_MEMBERSHIP: ::c_int = 35; pub const IP_DROP_MEMBERSHIP: ::c_int = 36; pub const IP_ADD_SOURCE_MEMBERSHIP: ::c_int = 39; @@ -846,6 +847,8 @@ pub const PATH_MAX: ::c_int = 4096; +pub const UIO_MAXIOV: ::c_int = 1024; + pub const FD_SETSIZE: usize = 1024; pub const EPOLLIN: ::c_int = 0x1; @@ -960,6 +963,17 @@ pub const WCONTINUED: ::c_int = 0x00000008; pub const WNOWAIT: ::c_int = 0x01000000; +// Options for personality(2). +pub const ADDR_NO_RANDOMIZE: ::c_int = 0x0040000; +pub const MMAP_PAGE_ZERO: ::c_int = 0x0100000; +pub const ADDR_COMPAT_LAYOUT: ::c_int = 0x0200000; +pub const READ_IMPLIES_EXEC: ::c_int = 0x0400000; +pub const ADDR_LIMIT_32BIT: ::c_int = 0x0800000; +pub const SHORT_INODE: ::c_int = 0x1000000; +pub const WHOLE_SECONDS: ::c_int = 0x2000000; +pub const STICKY_TIMEOUTS: ::c_int = 0x4000000; +pub const ADDR_LIMIT_3GB: ::c_int = 0x8000000; + // Options set using PTRACE_SETOPTIONS. pub const PTRACE_O_TRACESYSGOOD: ::c_int = 0x00000001; pub const PTRACE_O_TRACEFORK: ::c_int = 0x00000002; @@ -981,8 +995,6 @@ pub const PTRACE_EVENT_VFORK_DONE: ::c_int = 5; pub const PTRACE_EVENT_EXIT: ::c_int = 6; pub const PTRACE_EVENT_SECCOMP: ::c_int = 7; -// PTRACE_EVENT_STOP was added to glibc in 2.26 -// pub const PTRACE_EVENT_STOP: ::c_int = 128; pub const __WNOTHREAD: ::c_int = 0x20000000; pub const __WALL: ::c_int = 0x40000000; @@ -1017,6 +1029,13 @@ pub const SI_LOAD_SHIFT: ::c_uint = 16; +pub const CLD_EXITED: ::c_int = 1; +pub const CLD_KILLED: ::c_int = 2; +pub const CLD_DUMPED: ::c_int = 3; +pub const CLD_TRAPPED: ::c_int = 4; +pub const CLD_STOPPED: ::c_int = 5; +pub const CLD_CONTINUED: ::c_int = 6; + pub const SIGEV_SIGNAL: ::c_int = 0; pub const SIGEV_NONE: ::c_int = 1; pub const SIGEV_THREAD: ::c_int = 2; @@ -1024,6 +1043,11 @@ pub const P_ALL: idtype_t = 0; pub const P_PID: idtype_t = 1; pub const P_PGID: idtype_t = 2; +cfg_if! { + if #[cfg(not(target_os = "emscripten"))] { + pub const P_PIDFD: idtype_t = 3; + } +} pub const UTIME_OMIT: c_long = 1073741822; pub const UTIME_NOW: c_long = 1073741823; @@ -1211,56 +1235,66 @@ *slot = 0; } } +} - pub fn WIFSTOPPED(status: ::c_int) -> bool { +safe_f! { + pub {const} fn WIFSTOPPED(status: ::c_int) -> bool { (status & 0xff) == 0x7f } - pub fn WSTOPSIG(status: ::c_int) -> ::c_int { + pub {const} fn WSTOPSIG(status: ::c_int) -> ::c_int { (status >> 8) & 0xff } - pub fn WIFCONTINUED(status: ::c_int) -> bool { + pub {const} fn WIFCONTINUED(status: ::c_int) -> bool { status == 0xffff } - pub fn WIFSIGNALED(status: ::c_int) -> bool { + pub {const} fn WIFSIGNALED(status: ::c_int) -> bool { ((status & 0x7f) + 1) as i8 >= 2 } - pub fn WTERMSIG(status: ::c_int) -> ::c_int { + pub {const} fn WTERMSIG(status: ::c_int) -> ::c_int { status & 0x7f } - pub fn WIFEXITED(status: ::c_int) -> bool { + pub {const} fn WIFEXITED(status: ::c_int) -> bool { (status & 0x7f) == 0 } - pub fn WEXITSTATUS(status: ::c_int) -> ::c_int { + pub {const} fn WEXITSTATUS(status: ::c_int) -> ::c_int { (status >> 8) & 0xff } - pub fn WCOREDUMP(status: ::c_int) -> bool { + pub {const} fn WCOREDUMP(status: ::c_int) -> bool { (status & 0x80) != 0 } - pub fn QCMD(cmd: ::c_int, type_: ::c_int) -> ::c_int { + pub {const} fn W_EXITCODE(ret: ::c_int, sig: ::c_int) -> ::c_int { + (ret << 8) | sig + } + + pub {const} fn W_STOPCODE(sig: ::c_int) -> ::c_int { + (sig << 8) | 0x7f + } + + pub {const} fn QCMD(cmd: ::c_int, type_: ::c_int) -> ::c_int { (cmd << 8) | (type_ & 0x00ff) } - pub fn IPOPT_COPIED(o: u8) -> u8 { + pub {const} fn IPOPT_COPIED(o: u8) -> u8 { o & IPOPT_COPY } - pub fn IPOPT_CLASS(o: u8) -> u8 { + pub {const} fn IPOPT_CLASS(o: u8) -> u8 { o & IPOPT_CLASS_MASK } - pub fn IPOPT_NUMBER(o: u8) -> u8 { + pub {const} fn IPOPT_NUMBER(o: u8) -> u8 { o & IPOPT_NUMBER_MASK } - pub fn IPTOS_ECN(x: u8) -> u8 { + pub {const} fn IPTOS_ECN(x: u8) -> u8 { x & ::IPTOS_ECN_MASK } } @@ -1278,12 +1312,18 @@ len: ::size_t, vec: *mut ::c_uchar, ) -> ::c_int; + pub fn clock_getres(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int; pub fn clock_gettime(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int; pub fn clock_settime( clk_id: ::clockid_t, tp: *const ::timespec, ) -> ::c_int; + pub fn clock_getcpuclockid( + pid: ::pid_t, + clk_id: *mut ::clockid_t, + ) -> ::c_int; + pub fn dirfd(dirp: *mut ::DIR) -> ::c_int; pub fn pthread_getattr_np( @@ -1316,6 +1356,12 @@ len: ::off_t, advise: ::c_int, ) -> ::c_int; + pub fn posix_fadvise64( + fd: ::c_int, + offset: ::off64_t, + len: ::off64_t, + advise: ::c_int, + ) -> ::c_int; pub fn futimens(fd: ::c_int, times: *const ::timespec) -> ::c_int; pub fn utimensat( dirfd: ::c_int, diff -Nru cargo-0.44.1/vendor/libc/src/unix/mod.rs cargo-0.47.0/vendor/libc/src/unix/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -298,6 +298,32 @@ } else if #[cfg(feature = "std")] { // cargo build, don't pull in anything extra as the libstd dep // already pulls in all libs. + } else if #[cfg(all(target_os = "linux", + target_env = "gnu", + feature = "rustc-dep-of-std"))] { + #[link(name = "util", kind = "static-nobundle", + cfg(target_feature = "crt-static"))] + #[link(name = "rt", kind = "static-nobundle", + cfg(target_feature = "crt-static"))] + #[link(name = "pthread", kind = "static-nobundle", + cfg(target_feature = "crt-static"))] + #[link(name = "m", kind = "static-nobundle", + cfg(target_feature = "crt-static"))] + #[link(name = "dl", kind = "static-nobundle", + cfg(target_feature = "crt-static"))] + #[link(name = "c", kind = "static-nobundle", + cfg(target_feature = "crt-static"))] + #[link(name = "gcc_eh", kind = "static-nobundle", + cfg(target_feature = "crt-static"))] + #[link(name = "gcc", kind = "static-nobundle", + cfg(target_feature = "crt-static"))] + #[link(name = "util", cfg(not(target_feature = "crt-static")))] + #[link(name = "rt", cfg(not(target_feature = "crt-static")))] + #[link(name = "pthread", cfg(not(target_feature = "crt-static")))] + #[link(name = "m", cfg(not(target_feature = "crt-static")))] + #[link(name = "dl", cfg(not(target_feature = "crt-static")))] + #[link(name = "c", cfg(not(target_feature = "crt-static")))] + extern {} } else if #[cfg(target_env = "musl")] { #[cfg_attr(feature = "rustc-dep-of-std", link(name = "c", kind = "static", @@ -419,6 +445,19 @@ mode: *const c_char, file: *mut FILE, ) -> *mut FILE; + pub fn fmemopen( + buf: *mut c_void, + size: size_t, + mode: *const c_char, + ) -> *mut FILE; + pub fn open_memstream( + ptr: *mut *mut c_char, + sizeloc: *mut size_t, + ) -> *mut FILE; + pub fn open_wmemstream( + ptr: *mut *mut wchar_t, + sizeloc: *mut size_t, + ) -> *mut FILE; pub fn fflush(file: *mut FILE) -> c_int; pub fn fclose(file: *mut FILE) -> c_int; pub fn remove(filename: *const c_char) -> c_int; @@ -596,9 +635,13 @@ pub fn getchar_unlocked() -> ::c_int; pub fn putchar_unlocked(c: ::c_int) -> ::c_int; + #[cfg(not(all(libc_cfg_target_vendor, target_arch = "powerpc", + target_vendor = "nintendo")))] #[cfg_attr(target_os = "netbsd", link_name = "__socket30")] #[cfg_attr(target_os = "illumos", link_name = "__xnet_socket")] pub fn socket(domain: ::c_int, ty: ::c_int, protocol: ::c_int) -> ::c_int; + #[cfg(not(all(libc_cfg_target_vendor, target_arch = "powerpc", + target_vendor = "nintendo")))] #[cfg_attr( all(target_os = "macos", target_arch = "x86"), link_name = "connect$UNIX2003" @@ -614,6 +657,8 @@ link_name = "listen$UNIX2003" )] pub fn listen(socket: ::c_int, backlog: ::c_int) -> ::c_int; + #[cfg(not(all(libc_cfg_target_vendor, target_arch = "powerpc", + target_vendor = "nintendo")))] #[cfg_attr( all(target_os = "macos", target_arch = "x86"), link_name = "accept$UNIX2003" @@ -623,6 +668,8 @@ address: *mut sockaddr, address_len: *mut socklen_t, ) -> ::c_int; + #[cfg(not(all(libc_cfg_target_vendor, target_arch = "powerpc", + target_vendor = "nintendo")))] #[cfg_attr( all(target_os = "macos", target_arch = "x86"), link_name = "getpeername$UNIX2003" @@ -632,6 +679,8 @@ address: *mut sockaddr, address_len: *mut socklen_t, ) -> ::c_int; + #[cfg(not(all(libc_cfg_target_vendor, target_arch = "powerpc", + target_vendor = "nintendo")))] #[cfg_attr( all(target_os = "macos", target_arch = "x86"), link_name = "getsockname$UNIX2003" @@ -659,6 +708,8 @@ protocol: ::c_int, socket_vector: *mut ::c_int, ) -> ::c_int; + #[cfg(not(all(libc_cfg_target_vendor, target_arch = "powerpc", + target_vendor = "nintendo")))] #[cfg_attr( all(target_os = "macos", target_arch = "x86"), link_name = "sendto$UNIX2003" @@ -685,7 +736,10 @@ )] pub fn fchmod(fd: ::c_int, mode: mode_t) -> ::c_int; - #[cfg_attr(target_os = "macos", link_name = "fstat$INODE64")] + #[cfg_attr( + all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "fstat$INODE64" + )] #[cfg_attr(target_os = "netbsd", link_name = "__fstat50")] #[cfg_attr( all(target_os = "freebsd", any(freebsd11, freebsd10)), @@ -695,7 +749,10 @@ pub fn mkdir(path: *const c_char, mode: mode_t) -> ::c_int; - #[cfg_attr(target_os = "macos", link_name = "stat$INODE64")] + #[cfg_attr( + all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "stat$INODE64" + )] #[cfg_attr(target_os = "netbsd", link_name = "__stat50")] #[cfg_attr( all(target_os = "freebsd", any(freebsd11, freebsd10)), @@ -738,7 +795,10 @@ #[cfg_attr(target_os = "netbsd", link_name = "__opendir30")] pub fn opendir(dirname: *const c_char) -> *mut ::DIR; - #[cfg_attr(target_os = "macos", link_name = "readdir$INODE64")] + #[cfg_attr( + all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "readdir$INODE64" + )] #[cfg_attr(target_os = "netbsd", link_name = "__readdir30")] #[cfg_attr( all(target_os = "freebsd", any(freebsd11, freebsd10)), @@ -774,7 +834,10 @@ group: ::gid_t, flags: ::c_int, ) -> ::c_int; - #[cfg_attr(target_os = "macos", link_name = "fstatat$INODE64")] + #[cfg_attr( + all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "fstatat$INODE64" + )] #[cfg_attr( all(target_os = "freebsd", any(freebsd11, freebsd10)), link_name = "fstatat@FBSD_1.1" @@ -1007,7 +1070,10 @@ ifname: *mut ::c_char, ) -> *mut ::c_char; - #[cfg_attr(target_os = "macos", link_name = "lstat$INODE64")] + #[cfg_attr( + all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "lstat$INODE64" + )] #[cfg_attr(target_os = "netbsd", link_name = "__lstat50")] #[cfg_attr( all(target_os = "freebsd", any(freebsd11, freebsd10)), @@ -1219,6 +1285,8 @@ pub fn dlclose(handle: *mut ::c_void) -> ::c_int; pub fn dladdr(addr: *const ::c_void, info: *mut Dl_info) -> ::c_int; + #[cfg(not(all(libc_cfg_target_vendor, target_arch = "powerpc", + target_vendor = "nintendo")))] #[cfg_attr(target_os = "illumos", link_name = "__xnet_getaddrinfo")] pub fn getaddrinfo( node: *const c_char, @@ -1226,6 +1294,8 @@ hints: *const addrinfo, res: *mut *mut addrinfo, ) -> ::c_int; + #[cfg(not(all(libc_cfg_target_vendor, target_arch = "powerpc", + target_vendor = "nintendo")))] pub fn freeaddrinfo(res: *mut addrinfo); pub fn gai_strerror(errcode: ::c_int) -> *const ::c_char; #[cfg_attr( @@ -1483,7 +1553,8 @@ link_name = "fdopendir$INODE64$UNIX2003")] pub fn fdopendir(fd: ::c_int) -> *mut ::DIR; - #[cfg_attr(target_os = "macos", link_name = "readdir_r$INODE64")] + #[cfg_attr(all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "readdir_r$INODE64")] #[cfg_attr(target_os = "netbsd", link_name = "__readdir_r30")] #[cfg_attr( all(target_os = "freebsd", any(freebsd11, freebsd10)), diff -Nru cargo-0.44.1/vendor/libc/src/unix/newlib/aarch64/mod.rs cargo-0.47.0/vendor/libc/src/unix/newlib/aarch64/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/newlib/aarch64/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/newlib/aarch64/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -30,6 +30,8 @@ } } +pub const AF_INET6: ::c_int = 23; + pub const FIONBIO: ::c_ulong = 1; pub const POLLIN: ::c_short = 0x1; @@ -40,3 +42,11 @@ pub const POLLNVAL: ::c_short = 0x20; pub const SOL_SOCKET: ::c_int = 65535; + +pub const MSG_OOB: ::c_int = 1; +pub const MSG_PEEK: ::c_int = 2; +pub const MSG_DONTWAIT: ::c_int = 4; +pub const MSG_DONTROUTE: ::c_int = 0; +pub const MSG_WAITALL: ::c_int = 0; +pub const MSG_MORE: ::c_int = 0; +pub const MSG_NOSIGNAL: ::c_int = 0; diff -Nru cargo-0.44.1/vendor/libc/src/unix/newlib/arm/mod.rs cargo-0.47.0/vendor/libc/src/unix/newlib/arm/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/newlib/arm/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/newlib/arm/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -32,6 +32,8 @@ } } +pub const AF_INET6: ::c_int = 23; + pub const FIONBIO: ::c_ulong = 1; pub const POLLIN: ::c_short = 0x1; @@ -42,3 +44,11 @@ pub const POLLNVAL: ::c_short = 0x20; pub const SOL_SOCKET: ::c_int = 65535; + +pub const MSG_OOB: ::c_int = 1; +pub const MSG_PEEK: ::c_int = 2; +pub const MSG_DONTWAIT: ::c_int = 4; +pub const MSG_DONTROUTE: ::c_int = 0; +pub const MSG_WAITALL: ::c_int = 0; +pub const MSG_MORE: ::c_int = 0; +pub const MSG_NOSIGNAL: ::c_int = 0; diff -Nru cargo-0.44.1/vendor/libc/src/unix/newlib/mod.rs cargo-0.47.0/vendor/libc/src/unix/newlib/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/newlib/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/newlib/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -33,11 +33,15 @@ pub ai_protocol: ::c_int, pub ai_addrlen: socklen_t, + #[cfg(not(all(libc_cfg_target_vendor, target_arch = "powerpc", + target_vendor = "nintendo")))] #[cfg(target_arch = "xtensa")] pub ai_addr: *mut sockaddr, pub ai_canonname: *mut ::c_char, + #[cfg(not(all(libc_cfg_target_vendor, target_arch = "powerpc", + target_vendor = "nintendo")))] #[cfg(not(target_arch = "xtensa"))] pub ai_addr: *mut sockaddr, @@ -416,7 +420,6 @@ pub const AF_UNSPEC: ::c_int = 0; pub const AF_INET: ::c_int = 2; -pub const AF_INET6: ::c_int = 23; pub const CLOCK_REALTIME: ::clockid_t = 1; pub const CLOCK_MONOTONIC: ::clockid_t = 4; @@ -425,14 +428,6 @@ pub const SOCK_STREAM: ::c_int = 1; pub const SOCK_DGRAM: ::c_int = 2; -pub const MSG_OOB: ::c_int = 1; -pub const MSG_PEEK: ::c_int = 2; -pub const MSG_DONTWAIT: ::c_int = 4; -pub const MSG_DONTROUTE: ::c_int = 0; -pub const MSG_WAITALL: ::c_int = 0; -pub const MSG_MORE: ::c_int = 0; -pub const MSG_NOSIGNAL: ::c_int = 0; - pub const SHUT_RD: ::c_int = 0; pub const SHUT_WR: ::c_int = 1; pub const SHUT_RDWR: ::c_int = 2; @@ -607,6 +602,8 @@ pub fn rand() -> ::c_int; pub fn srand(seed: ::c_uint); + #[cfg(not(all(libc_cfg_target_vendor, target_arch = "powerpc", + target_vendor = "nintendo")))] pub fn bind(fd: ::c_int, addr: *const sockaddr, len: socklen_t) -> ::c_int; pub fn clock_settime( @@ -623,6 +620,8 @@ ) -> ::c_int; pub fn closesocket(sockfd: ::c_int) -> ::c_int; pub fn ioctl(fd: ::c_int, request: ::c_ulong, ...) -> ::c_int; + #[cfg(not(all(libc_cfg_target_vendor, target_arch = "powerpc", + target_vendor = "nintendo")))] pub fn recvfrom( fd: ::c_int, buf: *mut ::c_void, @@ -631,6 +630,8 @@ addr: *mut sockaddr, addr_len: *mut socklen_t, ) -> isize; + #[cfg(not(all(libc_cfg_target_vendor, target_arch = "powerpc", + target_vendor = "nintendo")))] pub fn getnameinfo( sa: *const sockaddr, salen: socklen_t, @@ -709,6 +710,9 @@ } else if #[cfg(target_arch = "xtensa")] { mod xtensa; pub use self::xtensa::*; + } else if #[cfg(target_arch = "powerpc")] { + mod powerpc; + pub use self::powerpc::*; } else { // Only tested on ARM so far. Other platforms might have different // definitions for types and constants. diff -Nru cargo-0.44.1/vendor/libc/src/unix/newlib/powerpc/mod.rs cargo-0.47.0/vendor/libc/src/unix/newlib/powerpc/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/newlib/powerpc/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/newlib/powerpc/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,14 @@ +pub type clock_t = ::c_ulong; +pub type c_char = u8; +pub type wchar_t = ::c_int; + +pub type c_long = i32; +pub type c_ulong = u32; + +// the newlib shipped with devkitPPC does not support the following components: +// - sockaddr +// - AF_INET6 +// - FIONBIO +// - POLL* +// - SOL_SOCKET +// - MSG_* diff -Nru cargo-0.44.1/vendor/libc/src/unix/newlib/xtensa/mod.rs cargo-0.47.0/vendor/libc/src/unix/newlib/xtensa/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/newlib/xtensa/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/newlib/xtensa/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -60,6 +60,7 @@ } pub const AF_UNIX: ::c_int = 1; +pub const AF_INET6: ::c_int = 10; pub const FIONBIO: ::c_ulong = 2147772030; @@ -75,6 +76,14 @@ pub const SOL_SOCKET: ::c_int = 0xfff; +pub const MSG_OOB: ::c_int = 0x04; +pub const MSG_PEEK: ::c_int = 0x01; +pub const MSG_DONTWAIT: ::c_int = 0x08; +pub const MSG_DONTROUTE: ::c_int = 0x4; +pub const MSG_WAITALL: ::c_int = 0x02; +pub const MSG_MORE: ::c_int = 0x10; +pub const MSG_NOSIGNAL: ::c_int = 0x20; + extern "C" { pub fn sendmsg( s: ::c_int, diff -Nru cargo-0.44.1/vendor/libc/src/unix/redox/mod.rs cargo-0.47.0/vendor/libc/src/unix/redox/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/redox/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/redox/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -829,38 +829,6 @@ // wait.h f! { - pub fn WIFSTOPPED(status: ::c_int) -> bool { - (status & 0xff) == 0x7f - } - - pub fn WSTOPSIG(status: ::c_int) -> ::c_int { - (status >> 8) & 0xff - } - - pub fn WIFCONTINUED(status: ::c_int) -> bool { - status == 0xffff - } - - pub fn WIFSIGNALED(status: ::c_int) -> bool { - ((status & 0x7f) + 1) as i8 >= 2 - } - - pub fn WTERMSIG(status: ::c_int) -> ::c_int { - status & 0x7f - } - - pub fn WIFEXITED(status: ::c_int) -> bool { - (status & 0x7f) == 0 - } - - pub fn WEXITSTATUS(status: ::c_int) -> ::c_int { - (status >> 8) & 0xff - } - - pub fn WCOREDUMP(status: ::c_int) -> bool { - (status & 0x80) != 0 - } - pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () { let fd = fd as usize; let size = ::mem::size_of_val(&(*set).fds_bits[0]) * 8; @@ -888,6 +856,40 @@ } } +safe_f! { + pub {const} fn WIFSTOPPED(status: ::c_int) -> bool { + (status & 0xff) == 0x7f + } + + pub {const} fn WSTOPSIG(status: ::c_int) -> ::c_int { + (status >> 8) & 0xff + } + + pub {const} fn WIFCONTINUED(status: ::c_int) -> bool { + status == 0xffff + } + + pub {const} fn WIFSIGNALED(status: ::c_int) -> bool { + ((status & 0x7f) + 1) as i8 >= 2 + } + + pub {const} fn WTERMSIG(status: ::c_int) -> ::c_int { + status & 0x7f + } + + pub {const} fn WIFEXITED(status: ::c_int) -> bool { + (status & 0x7f) == 0 + } + + pub {const} fn WEXITSTATUS(status: ::c_int) -> ::c_int { + (status >> 8) & 0xff + } + + pub {const} fn WCOREDUMP(status: ::c_int) -> bool { + (status & 0x80) != 0 + } +} + extern "C" { // errno.h pub fn __errno_location() -> *mut ::c_int; diff -Nru cargo-0.44.1/vendor/libc/src/unix/solarish/compat.rs cargo-0.47.0/vendor/libc/src/unix/solarish/compat.rs --- cargo-0.44.1/vendor/libc/src/unix/solarish/compat.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/solarish/compat.rs 2020-10-01 21:38:28.000000000 +0000 @@ -3,6 +3,9 @@ use unix::solarish::*; +const PTEM: &[u8] = b"ptem\0"; +const LDTERM: &[u8] = b"ldterm\0"; + pub unsafe fn cfmakeraw(termios: *mut ::termios) { (*termios).c_iflag &= !(IMAXBEL | IGNBRK @@ -45,3 +48,136 @@ ::cfsetospeed(termios, speed); 0 } + +unsafe fn bail(fdm: ::c_int, fds: ::c_int) -> ::c_int { + let e = *___errno(); + if fds >= 0 { + ::close(fds); + } + if fdm >= 0 { + ::close(fdm); + } + *___errno() = e; + return -1; +} + +pub unsafe fn openpty( + amain: *mut ::c_int, + asubord: *mut ::c_int, + name: *mut ::c_char, + termp: *const termios, + winp: *const ::winsize, +) -> ::c_int { + // Open the main pseudo-terminal device, making sure not to set it as the + // controlling terminal for this process: + let fdm = ::posix_openpt(O_RDWR | O_NOCTTY); + if fdm < 0 { + return -1; + } + + // Set permissions and ownership on the subordinate device and unlock it: + if ::grantpt(fdm) < 0 || ::unlockpt(fdm) < 0 { + return bail(fdm, -1); + } + + // Get the path name of the subordinate device: + let subordpath = ::ptsname(fdm); + if subordpath.is_null() { + return bail(fdm, -1); + } + + // Open the subordinate device without setting it as the controlling + // terminal for this process: + let fds = ::open(subordpath, O_RDWR | O_NOCTTY); + if fds < 0 { + return bail(fdm, -1); + } + + // Check if the STREAMS modules are already pushed: + let setup = ::ioctl(fds, I_FIND, LDTERM.as_ptr()); + if setup < 0 { + return bail(fdm, fds); + } else if setup == 0 { + // The line discipline is not present, so push the appropriate STREAMS + // modules for the subordinate device: + if ::ioctl(fds, I_PUSH, PTEM.as_ptr()) < 0 + || ::ioctl(fds, I_PUSH, LDTERM.as_ptr()) < 0 + { + return bail(fdm, fds); + } + } + + // If provided, set the terminal parameters: + if !termp.is_null() && ::tcsetattr(fds, TCSAFLUSH, termp) != 0 { + return bail(fdm, fds); + } + + // If provided, set the window size: + if !winp.is_null() && ::ioctl(fds, TIOCSWINSZ, winp) < 0 { + return bail(fdm, fds); + } + + // If the caller wants the name of the subordinate device, copy it out. + // + // Note that this is a terrible interface: there appears to be no standard + // upper bound on the copy length for this pointer. Nobody should pass + // anything but NULL here, preferring instead to use ptsname(3C) directly. + if !name.is_null() { + ::strcpy(name, subordpath); + } + + *amain = fdm; + *asubord = fds; + 0 +} + +pub unsafe fn forkpty( + amain: *mut ::c_int, + name: *mut ::c_char, + termp: *const termios, + winp: *const ::winsize, +) -> ::pid_t { + let mut fds = -1; + + if openpty(amain, &mut fds, name, termp, winp) != 0 { + return -1; + } + + let pid = ::fork(); + if pid < 0 { + return bail(*amain, fds); + } else if pid > 0 { + // In the parent process, we close the subordinate device and return the + // process ID of the new child: + ::close(fds); + return pid; + } + + // The rest of this function executes in the child process. + + // Close the main side of the pseudo-terminal pair: + ::close(*amain); + + // Use TIOCSCTTY to set the subordinate device as our controlling + // terminal. This will fail (with ENOTTY) if we are not the leader in + // our own session, so we call setsid() first. Finally, arrange for + // the pseudo-terminal to occupy the standard I/O descriptors. + if ::setsid() < 0 + || ::ioctl(fds, TIOCSCTTY, 0) < 0 + || ::dup2(fds, 0) < 0 + || ::dup2(fds, 1) < 0 + || ::dup2(fds, 2) < 0 + { + // At this stage there are no particularly good ways to handle failure. + // Exit as abruptly as possible, using _exit() to avoid messing with any + // state still shared with the parent process. + ::_exit(EXIT_FAILURE); + } + // Close the inherited descriptor, taking care to avoid closing the standard + // descriptors by mistake: + if fds > 2 { + ::close(fds); + } + + 0 +} diff -Nru cargo-0.44.1/vendor/libc/src/unix/solarish/illumos.rs cargo-0.47.0/vendor/libc/src/unix/solarish/illumos.rs --- cargo-0.44.1/vendor/libc/src/unix/solarish/illumos.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/solarish/illumos.rs 2020-10-01 21:38:28.000000000 +0000 @@ -18,7 +18,13 @@ pub const AF_LOCAL: ::c_int = 1; // AF_UNIX pub const AF_FILE: ::c_int = 1; // AF_UNIX +pub const EFD_SEMAPHORE: ::c_int = 0x1; +pub const EFD_NONBLOCK: ::c_int = 0x800; +pub const EFD_CLOEXEC: ::c_int = 0x80000; + extern "C" { + pub fn eventfd(init: ::c_uint, flags: ::c_int) -> ::c_int; + pub fn mincore( addr: ::caddr_t, len: ::size_t, diff -Nru cargo-0.44.1/vendor/libc/src/unix/solarish/mod.rs cargo-0.47.0/vendor/libc/src/unix/solarish/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/solarish/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/solarish/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -22,6 +22,8 @@ pub type time_t = ::c_long; pub type wchar_t = ::c_int; pub type nfds_t = ::c_ulong; +pub type projid_t = ::c_int; +pub type zoneid_t = ::c_int; pub type suseconds_t = ::c_long; pub type off_t = ::c_long; @@ -46,6 +48,15 @@ } } +#[cfg_attr(feature = "extra_traits", derive(Debug))] +pub enum ucred_t {} +impl ::Copy for ucred_t {} +impl ::Clone for ucred_t { + fn clone(&self) -> ucred_t { + *self + } +} + s! { pub struct in_addr { pub s_addr: ::in_addr_t, @@ -950,6 +961,8 @@ pub const SEEK_SET: ::c_int = 0; pub const SEEK_CUR: ::c_int = 1; pub const SEEK_END: ::c_int = 2; +pub const SEEK_DATA: ::c_int = 3; +pub const SEEK_HOLE: ::c_int = 4; pub const _IOFBF: ::c_int = 0; pub const _IONBF: ::c_int = 4; pub const _IOLBF: ::c_int = 64; @@ -1102,7 +1115,6 @@ pub const MS_SYNC: ::c_int = 0x0004; pub const MS_ASYNC: ::c_int = 0x0001; pub const MS_INVALIDATE: ::c_int = 0x0002; -pub const MS_INVALCURPROC: ::c_int = 0x0008; pub const EPERM: ::c_int = 1; pub const ENOENT: ::c_int = 2; @@ -1357,7 +1369,6 @@ pub const AF_INET_OFFLOAD: ::c_int = 30; pub const AF_TRILL: ::c_int = 31; pub const AF_PACKET: ::c_int = 32; -pub const AF_LX_NETLINK: ::c_int = 33; pub const SOCK_DGRAM: ::c_int = 1; pub const SOCK_STREAM: ::c_int = 2; @@ -1782,8 +1793,6 @@ pub const TIOCSWINSZ: ::c_int = _TIOC | 103; pub const TIOCGSOFTCAR: ::c_int = _TIOC | 105; pub const TIOCSSOFTCAR: ::c_int = _TIOC | 106; -pub const TIOCSETLD: ::c_int = _TIOC | 123; -pub const TIOCGETLD: ::c_int = _TIOC | 124; pub const TIOCGPPS: ::c_int = _TIOC | 125; pub const TIOCSPPS: ::c_int = _TIOC | 126; pub const TIOCGPPSEV: ::c_int = _TIOC | 127; @@ -1839,6 +1848,8 @@ pub const EPOLLET: ::c_int = 0x80000000; pub const EPOLLRDHUP: ::c_int = 0x2000; pub const EPOLLONESHOT: ::c_int = 0x40000000; +pub const EPOLLWAKEUP: ::c_int = 0x20000000; +pub const EPOLLEXCLUSIVE: ::c_int = 0x10000000; pub const EPOLL_CLOEXEC: ::c_int = 0x80000; pub const EPOLL_CTL_ADD: ::c_int = 1; pub const EPOLL_CTL_MOD: ::c_int = 3; @@ -1938,6 +1949,44 @@ pub const VSTATUS: usize = 16; pub const VERASE2: usize = 17; +// +const STR: ::c_int = (b'S' as ::c_int) << 8; +pub const I_NREAD: ::c_int = STR | 0o1; +pub const I_PUSH: ::c_int = STR | 0o2; +pub const I_POP: ::c_int = STR | 0o3; +pub const I_LOOK: ::c_int = STR | 0o4; +pub const I_FLUSH: ::c_int = STR | 0o5; +pub const I_SRDOPT: ::c_int = STR | 0o6; +pub const I_GRDOPT: ::c_int = STR | 0o7; +pub const I_STR: ::c_int = STR | 0o10; +pub const I_SETSIG: ::c_int = STR | 0o11; +pub const I_GETSIG: ::c_int = STR | 0o12; +pub const I_FIND: ::c_int = STR | 0o13; +pub const I_LINK: ::c_int = STR | 0o14; +pub const I_UNLINK: ::c_int = STR | 0o15; +pub const I_PEEK: ::c_int = STR | 0o17; +pub const I_FDINSERT: ::c_int = STR | 0o20; +pub const I_SENDFD: ::c_int = STR | 0o21; +pub const I_RECVFD: ::c_int = STR | 0o16; +pub const I_SWROPT: ::c_int = STR | 0o23; +pub const I_GWROPT: ::c_int = STR | 0o24; +pub const I_LIST: ::c_int = STR | 0o25; +pub const I_PLINK: ::c_int = STR | 0o26; +pub const I_PUNLINK: ::c_int = STR | 0o27; +pub const I_ANCHOR: ::c_int = STR | 0o30; +pub const I_FLUSHBAND: ::c_int = STR | 0o34; +pub const I_CKBAND: ::c_int = STR | 0o35; +pub const I_GETBAND: ::c_int = STR | 0o36; +pub const I_ATMARK: ::c_int = STR | 0o37; +pub const I_SETCLTIME: ::c_int = STR | 0o40; +pub const I_GETCLTIME: ::c_int = STR | 0o41; +pub const I_CANPUT: ::c_int = STR | 0o42; +pub const I_SERROPT: ::c_int = STR | 0o43; +pub const I_GERROPT: ::c_int = STR | 0o44; +pub const I_ESETSIG: ::c_int = STR | 0o45; +pub const I_EGETSIG: ::c_int = STR | 0o46; +pub const __I_PUSH_NOCTTY: ::c_int = STR | 0o47; + // 3SOCKET flags pub const SOCK_CLOEXEC: ::c_int = 0x080000; pub const SOCK_NONBLOCK: ::c_int = 0x100000; @@ -1999,7 +2048,63 @@ pub const PRIO_PGRP: ::c_int = 1; pub const PRIO_USER: ::c_int = 2; +// As per sys/socket.h, header alignment must be 8 bytes on SPARC +// and 4 bytes everywhere else: +#[cfg(target_arch = "sparc64")] +const _CMSG_HDR_ALIGNMENT: usize = 8; +#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +const _CMSG_HDR_ALIGNMENT: usize = 4; + +const _CMSG_DATA_ALIGNMENT: usize = ::mem::size_of::<::c_int>(); + +fn _CMSG_HDR_ALIGN(p: usize) -> usize { + (p + _CMSG_HDR_ALIGNMENT - 1) & !(_CMSG_HDR_ALIGNMENT - 1) +} + +fn _CMSG_DATA_ALIGN(p: usize) -> usize { + (p + _CMSG_DATA_ALIGNMENT - 1) & !(_CMSG_DATA_ALIGNMENT - 1) +} + f! { + pub fn CMSG_DATA(cmsg: *const ::cmsghdr) -> *mut ::c_uchar { + _CMSG_DATA_ALIGN(cmsg.offset(1) as usize) as *mut ::c_uchar + } + + pub fn CMSG_LEN(length: ::c_uint) -> ::c_uint { + _CMSG_DATA_ALIGN(::mem::size_of::<::cmsghdr>()) as ::c_uint + length + } + + pub fn CMSG_FIRSTHDR(mhdr: *const ::msghdr) -> *mut ::cmsghdr { + if ((*mhdr).msg_controllen as usize) < ::mem::size_of::<::cmsghdr>() { + 0 as *mut ::cmsghdr + } else { + (*mhdr).msg_control as *mut ::cmsghdr + } + } + + pub fn CMSG_NXTHDR(mhdr: *const ::msghdr, cmsg: *const ::cmsghdr) + -> *mut ::cmsghdr + { + if cmsg.is_null() { + return ::CMSG_FIRSTHDR(mhdr); + }; + let next = _CMSG_HDR_ALIGN(cmsg as usize + (*cmsg).cmsg_len as usize + + ::mem::size_of::<::cmsghdr>()); + let max = (*mhdr).msg_control as usize + + (*mhdr).msg_controllen as usize; + if next > max { + 0 as *mut ::cmsghdr + } else { + _CMSG_HDR_ALIGN(cmsg as usize + (*cmsg).cmsg_len as usize) + as *mut ::cmsghdr + } + } + + pub fn CMSG_SPACE(length: ::c_uint) -> ::c_uint { + _CMSG_HDR_ALIGN(::mem::size_of::<::cmsghdr>() as usize + + length as usize) as ::c_uint + } + pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () { let bits = ::mem::size_of_val(&(*set).fds_bits[0]) * 8; let fd = fd as usize; @@ -2025,36 +2130,38 @@ *slot = 0; } } +} - pub fn WIFEXITED(status: ::c_int) -> bool { +safe_f! { + pub {const} fn WIFEXITED(status: ::c_int) -> bool { (status & 0xFF) == 0 } - pub fn WEXITSTATUS(status: ::c_int) -> ::c_int { + pub {const} fn WEXITSTATUS(status: ::c_int) -> ::c_int { (status >> 8) & 0xFF } - pub fn WTERMSIG(status: ::c_int) -> ::c_int { + pub {const} fn WTERMSIG(status: ::c_int) -> ::c_int { status & 0x7F } - pub fn WIFCONTINUED(status: ::c_int) -> bool { + pub {const} fn WIFCONTINUED(status: ::c_int) -> bool { (status & 0xffff) == 0xffff } - pub fn WSTOPSIG(status: ::c_int) -> ::c_int { + pub {const} fn WSTOPSIG(status: ::c_int) -> ::c_int { (status & 0xff00) >> 8 } - pub fn WIFSIGNALED(status: ::c_int) -> bool { + pub {const} fn WIFSIGNALED(status: ::c_int) -> bool { ((status & 0xff) > 0) && (status & 0xff00 == 0) } - pub fn WIFSTOPPED(status: ::c_int) -> bool { + pub {const} fn WIFSTOPPED(status: ::c_int) -> bool { ((status & 0xff) == 0x7f) && ((status & 0xff00) != 0) } - pub fn WCOREDUMP(status: ::c_int) -> bool { + pub {const} fn WCOREDUMP(status: ::c_int) -> bool { (status & 0x80) != 0 } } @@ -2161,6 +2268,15 @@ f: extern "C" fn(*mut ::c_void) -> *mut ::c_void, value: *mut ::c_void, ) -> ::c_int; + pub fn pthread_getattr_np( + thread: ::pthread_t, + attr: *mut ::pthread_attr_t, + ) -> ::c_int; + pub fn pthread_attr_getstack( + attr: *const ::pthread_attr_t, + stackaddr: *mut *mut ::c_void, + stacksize: *mut ::size_t, + ) -> ::c_int; pub fn pthread_condattr_getclock( attr: *const pthread_condattr_t, clock_id: *mut clockid_t, @@ -2301,7 +2417,7 @@ fd: ::c_int, address: *mut sockaddr, address_len: *mut socklen_t, - flags: ::c_int + flags: ::c_int, ) -> ::c_int; pub fn mq_open(name: *const ::c_char, oflag: ::c_int, ...) -> ::mqd_t; @@ -2526,6 +2642,29 @@ pub fn ntp_adjtime(buf: *mut timex) -> ::c_int; pub fn ntp_gettime(buf: *mut ntptimeval) -> ::c_int; + + pub fn ucred_get(pid: ::pid_t) -> *mut ucred_t; + pub fn getpeerucred(fd: ::c_int, ucred: *mut *mut ucred_t) -> ::c_int; + + pub fn ucred_free(ucred: *mut ucred_t); + + pub fn ucred_geteuid(ucred: *const ucred_t) -> ::uid_t; + pub fn ucred_getruid(ucred: *const ucred_t) -> ::uid_t; + pub fn ucred_getsuid(ucred: *const ucred_t) -> ::uid_t; + pub fn ucred_getegid(ucred: *const ucred_t) -> ::gid_t; + pub fn ucred_getrgid(ucred: *const ucred_t) -> ::gid_t; + pub fn ucred_getsgid(ucred: *const ucred_t) -> ::gid_t; + pub fn ucred_getgroups( + ucred: *const ucred_t, + groups: *mut *const ::gid_t, + ) -> ::c_int; + pub fn ucred_getpid(ucred: *const ucred_t) -> ::pid_t; + pub fn ucred_getprojid(ucred: *const ucred_t) -> projid_t; + pub fn ucred_getzoneid(ucred: *const ucred_t) -> zoneid_t; + pub fn ucred_getpflags(ucred: *const ucred_t, flags: ::c_uint) + -> ::c_uint; + + pub fn ucred_size() -> ::size_t; } mod compat; diff -Nru cargo-0.44.1/vendor/libc/src/unix/solarish/solaris.rs cargo-0.47.0/vendor/libc/src/unix/solarish/solaris.rs --- cargo-0.44.1/vendor/libc/src/unix/solarish/solaris.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/solarish/solaris.rs 2020-10-01 21:38:28.000000000 +0000 @@ -29,8 +29,6 @@ pub const PORT_SOURCE_POSTWAIT: ::c_int = 8; pub const PORT_SOURCE_SIGNAL: ::c_int = 9; -pub const EPOLLEXCLUSIVE: ::c_int = 0x10000000; - pub const AF_LOCAL: ::c_int = 0; pub const AF_FILE: ::c_int = 0; diff -Nru cargo-0.44.1/vendor/libc/src/unix/uclibc/mod.rs cargo-0.47.0/vendor/libc/src/unix/uclibc/mod.rs --- cargo-0.44.1/vendor/libc/src/unix/uclibc/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/unix/uclibc/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -870,6 +870,7 @@ pub const MSG_CMSG_CLOEXEC: ::c_int = 0x40000000; pub const SOCK_RAW: ::c_int = 3; +pub const SOCK_RDM: ::c_int = 4; pub const IP_MULTICAST_TTL: ::c_int = 33; pub const IP_MULTICAST_LOOP: ::c_int = 34; pub const IP_TTL: ::c_int = 2; @@ -931,6 +932,8 @@ pub const PATH_MAX: ::c_int = 4096; +pub const UIO_MAXIOV: ::c_int = 1024; + pub const FD_SETSIZE: usize = 1024; pub const EPOLLIN: ::c_int = 0x1; @@ -1534,38 +1537,6 @@ } } - pub fn WIFSTOPPED(status: ::c_int) -> bool { - (status & 0xff) == 0x7f - } - - pub fn WSTOPSIG(status: ::c_int) -> ::c_int { - (status >> 8) & 0xff - } - - pub fn WIFCONTINUED(status: ::c_int) -> bool { - status == 0xffff - } - - pub fn WIFSIGNALED(status: ::c_int) -> bool { - ((status & 0x7f) + 1) as i8 >= 2 - } - - pub fn WTERMSIG(status: ::c_int) -> ::c_int { - status & 0x7f - } - - pub fn WIFEXITED(status: ::c_int) -> bool { - (status & 0x7f) == 0 - } - - pub fn WEXITSTATUS(status: ::c_int) -> ::c_int { - (status >> 8) & 0xff - } - - pub fn WCOREDUMP(status: ::c_int) -> bool { - (status & 0x80) != 0 - } - pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { for slot in cpuset.bits.iter_mut() { *slot = 0; @@ -1597,8 +1568,42 @@ pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { set1.bits == set2.bits } +} + +safe_f! { + pub {const} fn WIFSTOPPED(status: ::c_int) -> bool { + (status & 0xff) == 0x7f + } + + pub {const} fn WSTOPSIG(status: ::c_int) -> ::c_int { + (status >> 8) & 0xff + } + + pub {const} fn WIFCONTINUED(status: ::c_int) -> bool { + status == 0xffff + } + + pub {const} fn WIFSIGNALED(status: ::c_int) -> bool { + ((status & 0x7f) + 1) as i8 >= 2 + } + + pub {const} fn WTERMSIG(status: ::c_int) -> ::c_int { + status & 0x7f + } + + pub {const} fn WIFEXITED(status: ::c_int) -> bool { + (status & 0x7f) == 0 + } + + pub {const} fn WEXITSTATUS(status: ::c_int) -> ::c_int { + (status >> 8) & 0xff + } + + pub {const} fn WCOREDUMP(status: ::c_int) -> bool { + (status & 0x80) != 0 + } - pub fn QCMD(cmd: ::c_int, type_: ::c_int) -> ::c_int { + pub {const} fn QCMD(cmd: ::c_int, type_: ::c_int) -> ::c_int { (cmd << 8) | (type_ & 0x00ff) } } diff -Nru cargo-0.44.1/vendor/libc/src/vxworks/mod.rs cargo-0.47.0/vendor/libc/src/vxworks/mod.rs --- cargo-0.44.1/vendor/libc/src/vxworks/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/vxworks/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -112,6 +112,24 @@ } } +impl siginfo_t { + pub unsafe fn si_addr(&self) -> *mut ::c_void { + self.si_addr + } + + pub unsafe fn si_value(&self) -> ::sigval { + self.si_value + } + + pub unsafe fn si_pid(&self) -> ::pid_t { + self.si_pid + } + + pub unsafe fn si_uid(&self) -> ::uid_t { + self.si_uid + } +} + s! { // b_pthread_condattr_t.h pub struct pthread_condattr_t { @@ -2024,23 +2042,25 @@ //Dummy functions, these don't really exist in VxWorks. // wait.h macros -pub fn WIFEXITED(status: ::c_int) -> bool { - (status & 0xFF00) == 0 -} -pub fn WIFSIGNALED(status: ::c_int) -> bool { - (status & 0xFF00) != 0 -} -pub fn WIFSTOPPED(status: ::c_int) -> bool { - (status & 0xFF0000) != 0 -} -pub fn WEXITSTATUS(status: ::c_int) -> ::c_int { - status & 0xFF -} -pub fn WTERMSIG(status: ::c_int) -> ::c_int { - (status >> 8) & 0xFF -} -pub fn WSTOPSIG(status: ::c_int) -> ::c_int { - (status >> 16) & 0xFF +safe_f! { + pub {const} fn WIFEXITED(status: ::c_int) -> bool { + (status & 0xFF00) == 0 + } + pub {const} fn WIFSIGNALED(status: ::c_int) -> bool { + (status & 0xFF00) != 0 + } + pub {const} fn WIFSTOPPED(status: ::c_int) -> bool { + (status & 0xFF0000) != 0 + } + pub {const} fn WEXITSTATUS(status: ::c_int) -> ::c_int { + status & 0xFF + } + pub {const} fn WTERMSIG(status: ::c_int) -> ::c_int { + (status >> 8) & 0xFF + } + pub {const} fn WSTOPSIG(status: ::c_int) -> ::c_int { + (status >> 16) & 0xFF + } } pub fn pread( diff -Nru cargo-0.44.1/vendor/libc/src/wasi.rs cargo-0.47.0/vendor/libc/src/wasi.rs --- cargo-0.44.1/vendor/libc/src/wasi.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/wasi.rs 2020-10-01 21:38:28.000000000 +0000 @@ -180,6 +180,11 @@ pub const _IOFBF: c_int = 0; pub const _IONBF: c_int = 2; pub const _IOLBF: c_int = 1; +pub const F_GETFD: c_int = 1; +pub const F_SETFD: c_int = 2; +pub const F_GETFL: c_int = 3; +pub const F_SETFL: c_int = 4; +pub const FD_CLOEXEC: c_int = 1; pub const FD_SETSIZE: size_t = 1024; pub const O_APPEND: c_int = 0x0001; pub const O_DSYNC: c_int = 0x0002; @@ -197,6 +202,7 @@ pub const O_WRONLY: c_int = 0x10000000; pub const O_RDWR: c_int = O_WRONLY | O_RDONLY; pub const O_ACCMODE: c_int = O_EXEC | O_RDWR | O_SEARCH; +pub const O_NOCTTY: c_int = 0x0; pub const POSIX_FADV_DONTNEED: c_int = 4; pub const POSIX_FADV_NOREUSE: c_int = 5; pub const POSIX_FADV_NORMAL: c_int = 0; @@ -209,6 +215,33 @@ pub const AT_REMOVEDIR: c_int = 0x4; pub const UTIME_OMIT: c_long = 0xfffffffe; pub const UTIME_NOW: c_long = 0xffffffff; +pub const S_IFIFO: mode_t = 49152; +pub const S_IFCHR: mode_t = 8192; +pub const S_IFBLK: mode_t = 24576; +pub const S_IFDIR: mode_t = 16384; +pub const S_IFREG: mode_t = 32768; +pub const S_IFLNK: mode_t = 40960; +pub const S_IFSOCK: mode_t = 49152; +pub const S_IFMT: mode_t = 57344; +pub const DT_UNKNOWN: u8 = 0; +pub const DT_BLK: u8 = 1; +pub const DT_CHR: u8 = 2; +pub const DT_DIR: u8 = 3; +pub const DT_REG: u8 = 4; +pub const DT_LNK: u8 = 7; +pub const FIONREAD: c_int = 1; +pub const FIONBIO: c_int = 2; +pub const F_OK: ::c_int = 0; +pub const R_OK: ::c_int = 4; +pub const W_OK: ::c_int = 2; +pub const X_OK: ::c_int = 1; +pub const POLLIN: ::c_short = 0x1; +pub const POLLOUT: ::c_short = 0x2; +pub const POLLERR: ::c_short = 0x1000; +pub const POLLHUP: ::c_short = 0x2000; +pub const POLLNVAL: ::c_short = 0x4000; +pub const POLLRDNORM: ::c_short = 0x1; +pub const POLLWRNORM: ::c_short = 0x2; pub const E2BIG: c_int = 1; pub const EACCES: c_int = 2; @@ -289,6 +322,11 @@ pub const EOPNOTSUPP: c_int = ENOTSUP; pub const EWOULDBLOCK: c_int = EAGAIN; +pub const _SC_PAGESIZE: c_int = 30; +pub const _SC_PAGE_SIZE: ::c_int = _SC_PAGESIZE; +pub const _SC_IOV_MAX: c_int = 60; +pub const _SC_SYMLOOP_MAX: c_int = 173; + #[cfg_attr( feature = "rustc-dep-of-std", link(name = "c", kind = "static", cfg(target_feature = "crt-static")) @@ -378,6 +416,7 @@ ) -> size_t; pub fn gmtime(a: *const time_t) -> *mut tm; pub fn gmtime_r(a: *const time_t, b: *mut tm) -> *mut tm; + pub fn localtime(a: *const time_t) -> *mut tm; pub fn localtime_r(a: *const time_t, b: *mut tm) -> *mut tm; pub fn asctime_r(a: *const tm, b: *mut c_char) -> *mut c_char; pub fn ctime_r(a: *const time_t, b: *mut c_char) -> *mut c_char; @@ -403,6 +442,7 @@ pub fn isspace(c: c_int) -> c_int; pub fn isupper(c: c_int) -> c_int; pub fn isxdigit(c: c_int) -> c_int; + pub fn isblank(c: c_int) -> c_int; pub fn tolower(c: c_int) -> c_int; pub fn toupper(c: c_int) -> c_int; pub fn setvbuf( @@ -448,6 +488,7 @@ pub fn strspn(cs: *const c_char, ct: *const c_char) -> size_t; pub fn strcspn(cs: *const c_char, ct: *const c_char) -> size_t; pub fn strdup(cs: *const c_char) -> *mut c_char; + pub fn strndup(cs: *const c_char, n: size_t) -> *mut c_char; pub fn strpbrk(cs: *const c_char, ct: *const c_char) -> *mut c_char; pub fn strstr(cs: *const c_char, ct: *const c_char) -> *mut c_char; pub fn strcasecmp(s1: *const c_char, s2: *const c_char) -> c_int; @@ -515,6 +556,8 @@ pub fn closedir(dirp: *mut ::DIR) -> ::c_int; pub fn rewinddir(dirp: *mut ::DIR); pub fn dirfd(dirp: *mut ::DIR) -> ::c_int; + pub fn seekdir(dirp: *mut ::DIR, loc: ::c_long); + pub fn telldir(dirp: *mut ::DIR) -> ::c_long; pub fn openat( dirfd: ::c_int, @@ -575,7 +618,6 @@ pub fn link(src: *const c_char, dst: *const c_char) -> ::c_int; pub fn lseek(fd: ::c_int, offset: off_t, whence: ::c_int) -> off_t; pub fn pathconf(path: *const c_char, name: ::c_int) -> c_long; - pub fn pause() -> ::c_int; pub fn rmdir(path: *const c_char) -> ::c_int; pub fn sleep(secs: ::c_uint) -> ::c_uint; pub fn unlink(c: *const c_char) -> ::c_int; @@ -643,6 +685,8 @@ pub fn sysconf(name: ::c_int) -> ::c_long; + pub fn ioctl(fd: ::c_int, request: ::c_int, ...) -> ::c_int; + pub fn fseeko( stream: *mut ::FILE, offset: ::off_t, diff -Nru cargo-0.44.1/vendor/libc/src/windows/mod.rs cargo-0.47.0/vendor/libc/src/windows/mod.rs --- cargo-0.44.1/vendor/libc/src/windows/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libc/src/windows/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -444,6 +444,35 @@ pub fn dup(fd: ::c_int) -> ::c_int; #[link_name = "_dup2"] pub fn dup2(src: ::c_int, dst: ::c_int) -> ::c_int; + #[link_name = "_execl"] + pub fn execl(path: *const c_char, arg0: *const c_char, ...) -> intptr_t; + #[link_name = "_wexecl"] + pub fn wexecl(path: *const wchar_t, arg0: *const wchar_t, ...) + -> intptr_t; + #[link_name = "_execle"] + pub fn execle(path: *const c_char, arg0: *const c_char, ...) -> intptr_t; + #[link_name = "_wexecle"] + pub fn wexecle( + path: *const wchar_t, + arg0: *const wchar_t, + ... + ) -> intptr_t; + #[link_name = "_execlp"] + pub fn execlp(path: *const c_char, arg0: *const c_char, ...) -> intptr_t; + #[link_name = "_wexeclp"] + pub fn wexeclp( + path: *const wchar_t, + arg0: *const wchar_t, + ... + ) -> intptr_t; + #[link_name = "_execlpe"] + pub fn execlpe(path: *const c_char, arg0: *const c_char, ...) -> intptr_t; + #[link_name = "_wexeclpe"] + pub fn wexeclpe( + path: *const wchar_t, + arg0: *const wchar_t, + ... + ) -> intptr_t; #[link_name = "_execv"] pub fn execv( prog: *const c_char, @@ -463,6 +492,28 @@ argv: *const *const c_char, envp: *const *const c_char, ) -> ::c_int; + #[link_name = "_wexecv"] + pub fn wexecv( + prog: *const wchar_t, + argv: *const *const wchar_t, + ) -> ::intptr_t; + #[link_name = "_wexecve"] + pub fn wexecve( + prog: *const wchar_t, + argv: *const *const wchar_t, + envp: *const *const wchar_t, + ) -> ::intptr_t; + #[link_name = "_wexecvp"] + pub fn wexecvp( + c: *const wchar_t, + argv: *const *const wchar_t, + ) -> ::intptr_t; + #[link_name = "_wexecvpe"] + pub fn wexecvpe( + c: *const wchar_t, + argv: *const *const wchar_t, + envp: *const *const wchar_t, + ) -> ::intptr_t; #[link_name = "_getcwd"] pub fn getcwd(buf: *mut c_char, size: ::c_int) -> *mut c_char; #[link_name = "_getpid"] @@ -507,6 +558,8 @@ category: ::c_int, locale: *const wchar_t, ) -> *mut wchar_t; + #[link_name = "_aligned_malloc"] + pub fn aligned_malloc(size: size_t, alignment: size_t) -> *mut c_void; } extern "system" { diff -Nru cargo-0.44.1/vendor/libgit2-sys/build.rs cargo-0.47.0/vendor/libgit2-sys/build.rs --- cargo-0.44.1/vendor/libgit2-sys/build.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libgit2-sys/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -6,18 +6,22 @@ fn main() { let https = env::var("CARGO_FEATURE_HTTPS").is_ok(); let ssh = env::var("CARGO_FEATURE_SSH").is_ok(); + let zlib_ng_compat = env::var("CARGO_FEATURE_ZLIB_NG_COMPAT").is_ok(); - let mut cfg = pkg_config::Config::new(); - if let Ok(lib) = cfg.atleast_version("1.0.0").probe("libgit2") { - for include in &lib.include_paths { - println!("cargo:root={}", include.display()); + // To use zlib-ng in zlib-compat mode, we have to build libgit2 ourselves. + if !zlib_ng_compat { + let mut cfg = pkg_config::Config::new(); + if let Ok(lib) = cfg.probe("libgit2") { + for include in &lib.include_paths { + println!("cargo:root={}", include.display()); + } + return; } - return; } if false { let _ = Command::new("git") - .args(&["submodule", "update", "--init"]) + .args(&["submodule", "update", "--init", "libgit2"]) .status(); } diff -Nru cargo-0.44.1/vendor/libgit2-sys/.cargo-checksum.json cargo-0.47.0/vendor/libgit2-sys/.cargo-checksum.json --- cargo-0.44.1/vendor/libgit2-sys/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libgit2-sys/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"bf81b43f9b45ab07897a780c9b7b26b1504497e469c7a78162fc29e3b8b1c1b3"} \ No newline at end of file +{"files":{},"package":"069eea34f76ec15f2822ccf78fe0cdb8c9016764d0a12865278585a74dbdeae5"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/libgit2-sys/Cargo.toml cargo-0.47.0/vendor/libgit2-sys/Cargo.toml --- cargo-0.44.1/vendor/libgit2-sys/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libgit2-sys/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "libgit2-sys" -version = "0.12.6+1.0.0" +version = "0.12.13+1.0.1" authors = ["Josh Triplett ", "Alex Crichton "] build = "build.rs" links = "git2" @@ -29,13 +29,15 @@ version = "0.2" [dependencies.libssh2-sys] -version = "0.2.11" +version = "0.2.19" optional = true [dependencies.libz-sys] -version = "1.0.22" +version = "1.1.0" +features = ["libc"] +default-features = false [build-dependencies.cc] -version = "1.0.42" +version = "1.0.43" features = ["parallel"] [build-dependencies.pkg-config] @@ -45,6 +47,7 @@ https = ["openssl-sys"] ssh = ["libssh2-sys"] ssh_key_from_memory = [] +zlib-ng-compat = ["libz-sys/zlib-ng", "libssh2-sys/zlib-ng-compat"] [target."cfg(unix)".dependencies.openssl-sys] version = "0.9" optional = true diff -Nru cargo-0.44.1/vendor/libgit2-sys/debian/patches/no-special-snowflake-env.patch cargo-0.47.0/vendor/libgit2-sys/debian/patches/no-special-snowflake-env.patch --- cargo-0.44.1/vendor/libgit2-sys/debian/patches/no-special-snowflake-env.patch 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libgit2-sys/debian/patches/no-special-snowflake-env.patch 2020-10-01 21:38:28.000000000 +0000 @@ -1,11 +1,20 @@ --- a/build.rs +++ b/build.rs -@@ -15,7 +15,7 @@ - return; +@@ -11,7 +11,7 @@ + // To use zlib-ng in zlib-compat mode, we have to build libgit2 ourselves. + if !zlib_ng_compat { + let mut cfg = pkg_config::Config::new(); +- if let Ok(lib) = cfg.atleast_version("1.0.0").probe("libgit2") { ++ if let Ok(lib) = cfg.probe("libgit2") { + for include in &lib.include_paths { + println!("cargo:root={}", include.display()); + } +@@ -19,7 +19,7 @@ + } } - if !Path::new("libgit2/.git").exists() { + if false { let _ = Command::new("git") - .args(&["submodule", "update", "--init"]) + .args(&["submodule", "update", "--init", "libgit2"]) .status(); diff -Nru cargo-0.44.1/vendor/libgit2-sys/lib.rs cargo-0.47.0/vendor/libgit2-sys/lib.rs --- cargo-0.44.1/vendor/libgit2-sys/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libgit2-sys/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -24,6 +24,7 @@ pub const GIT_REFDB_BACKEND_VERSION: c_uint = 1; pub const GIT_CHERRYPICK_OPTIONS_VERSION: c_uint = 1; pub const GIT_APPLY_OPTIONS_VERSION: c_uint = 1; +pub const GIT_REVERT_OPTIONS_VERSION: c_uint = 1; macro_rules! git_enum { (pub enum $name:ident { $($variants:tt)* }) => { @@ -1746,6 +1747,8 @@ pub checkout_opts: git_checkout_options, } +pub type git_revert_options = git_cherrypick_options; + pub type git_apply_delta_cb = Option c_int>; @@ -1775,6 +1778,42 @@ } } +git_enum! { + pub enum git_libgit2_opt_t { + GIT_OPT_GET_MWINDOW_SIZE = 0, + GIT_OPT_SET_MWINDOW_SIZE, + GIT_OPT_GET_MWINDOW_MAPPED_LIMIT, + GIT_OPT_SET_MWINDOW_MAPPED_LIMIT, + GIT_OPT_GET_SEARCH_PATH, + GIT_OPT_SET_SEARCH_PATH, + GIT_OPT_SET_CACHE_OBJECT_LIMIT, + GIT_OPT_SET_CACHE_MAX_SIZE, + GIT_OPT_ENABLE_CACHING, + GIT_OPT_GET_CACHED_MEMORY, + GIT_OPT_GET_TEMPLATE_PATH, + GIT_OPT_SET_TEMPLATE_PATH, + GIT_OPT_SET_SSL_CERT_LOCATIONS, + GIT_OPT_SET_USER_AGENT, + GIT_OPT_ENABLE_STRICT_OBJECT_CREATION, + GIT_OPT_ENABLE_STRICT_SYMBOLIC_REF_CREATION, + GIT_OPT_SET_SSL_CIPHERS, + GIT_OPT_GET_USER_AGENT, + GIT_OPT_ENABLE_OFS_DELTA, + GIT_OPT_ENABLE_FSYNC_GITDIR, + GIT_OPT_GET_WINDOWS_SHAREMODE, + GIT_OPT_SET_WINDOWS_SHAREMODE, + GIT_OPT_ENABLE_STRICT_HASH_VERIFICATION, + GIT_OPT_SET_ALLOCATOR, + GIT_OPT_ENABLE_UNSAVED_INDEX_SAFETY, + GIT_OPT_GET_PACK_MAX_OBJECTS, + GIT_OPT_SET_PACK_MAX_OBJECTS, + GIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS, + GIT_OPT_ENABLE_HTTP_EXPECT_CONTINUE, + GIT_OPT_GET_MWINDOW_FILE_LIMIT, + GIT_OPT_SET_MWINDOW_FILE_LIMIT, + } +} + extern "C" { // threads pub fn git_libgit2_init() -> c_int; @@ -2046,6 +2085,7 @@ remote: *mut git_remote, callbacks: *const git_remote_callbacks, ) -> c_int; + pub fn git_remote_default_branch(out: *mut git_buf, remote: *mut git_remote) -> c_int; // refspec pub fn git_refspec_direction(spec: *const git_refspec) -> git_direction; @@ -2055,6 +2095,16 @@ pub fn git_refspec_src_matches(spec: *const git_refspec, refname: *const c_char) -> c_int; pub fn git_refspec_force(spec: *const git_refspec) -> c_int; pub fn git_refspec_string(spec: *const git_refspec) -> *const c_char; + pub fn git_refspec_transform( + out: *mut git_buf, + spec: *const git_refspec, + name: *const c_char, + ) -> c_int; + pub fn git_refspec_rtransform( + out: *mut git_buf, + spec: *const git_refspec, + name: *const c_char, + ) -> c_int; // strarray pub fn git_strarray_free(array: *mut git_strarray); @@ -2274,6 +2324,11 @@ ) -> c_int; pub fn git_submodule_add_to_index(submodule: *mut git_submodule, write_index: c_int) -> c_int; pub fn git_submodule_branch(submodule: *mut git_submodule) -> *const c_char; + pub fn git_submodule_clone( + repo: *mut *mut git_repository, + submodule: *mut git_submodule, + opts: *const git_submodule_update_options, + ) -> c_int; pub fn git_submodule_foreach( repo: *mut git_repository, callback: git_submodule_cb, @@ -2577,6 +2632,11 @@ force: c_int, ) -> c_int; pub fn git_branch_name(out: *mut *const c_char, branch: *const git_reference) -> c_int; + pub fn git_branch_remote_name( + out: *mut git_buf, + repo: *mut git_repository, + refname: *const c_char, + ) -> c_int; pub fn git_branch_next( out: *mut *mut git_reference, out_type: *mut git_branch_t, @@ -2600,6 +2660,8 @@ ) -> c_int; // index + pub fn git_index_version(index: *mut git_index) -> c_uint; + pub fn git_index_set_version(index: *mut git_index, version: c_uint) -> c_int; pub fn git_index_add(index: *mut git_index, entry: *const git_index_entry) -> c_int; pub fn git_index_add_all( index: *mut git_index, @@ -2781,6 +2843,12 @@ ) -> c_int; pub fn git_config_snapshot(out: *mut *mut git_config, config: *mut git_config) -> c_int; pub fn git_config_entry_free(entry: *mut git_config_entry); + pub fn git_config_multivar_iterator_new( + out: *mut *mut git_config_iterator, + cfg: *const git_config, + name: *const c_char, + regexp: *const c_char, + ) -> c_int; // attr pub fn git_attr_get( @@ -3679,6 +3747,24 @@ location: git_apply_location_t, options: *const git_apply_options, ) -> c_int; + + // revert + pub fn git_revert_options_init(opts: *mut git_revert_options, version: c_uint) -> c_int; + pub fn git_revert_commit( + out: *mut *mut git_index, + repo: *mut git_repository, + revert_commit: *mut git_commit, + our_commit: *mut git_commit, + mainline: c_uint, + merge_options: *const git_merge_options, + ) -> c_int; + pub fn git_revert( + repo: *mut git_repository, + commit: *mut git_commit, + given_opts: *const git_revert_options, + ) -> c_int; + + pub fn git_libgit2_opts(option: c_int, ...) -> c_int; } pub fn init() { diff -Nru cargo-0.44.1/vendor/libgit2-sys/.pc/no-special-snowflake-env.patch/build.rs cargo-0.47.0/vendor/libgit2-sys/.pc/no-special-snowflake-env.patch/build.rs --- cargo-0.44.1/vendor/libgit2-sys/.pc/no-special-snowflake-env.patch/build.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libgit2-sys/.pc/no-special-snowflake-env.patch/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -6,18 +6,22 @@ fn main() { let https = env::var("CARGO_FEATURE_HTTPS").is_ok(); let ssh = env::var("CARGO_FEATURE_SSH").is_ok(); + let zlib_ng_compat = env::var("CARGO_FEATURE_ZLIB_NG_COMPAT").is_ok(); - let mut cfg = pkg_config::Config::new(); - if let Ok(lib) = cfg.atleast_version("1.0.0").probe("libgit2") { - for include in &lib.include_paths { - println!("cargo:root={}", include.display()); + // To use zlib-ng in zlib-compat mode, we have to build libgit2 ourselves. + if !zlib_ng_compat { + let mut cfg = pkg_config::Config::new(); + if let Ok(lib) = cfg.atleast_version("1.0.0").probe("libgit2") { + for include in &lib.include_paths { + println!("cargo:root={}", include.display()); + } + return; } - return; } if !Path::new("libgit2/.git").exists() { let _ = Command::new("git") - .args(&["submodule", "update", "--init"]) + .args(&["submodule", "update", "--init", "libgit2"]) .status(); } diff -Nru cargo-0.44.1/vendor/libnghttp2-sys/.cargo-checksum.json cargo-0.47.0/vendor/libnghttp2-sys/.cargo-checksum.json --- cargo-0.44.1/vendor/libnghttp2-sys/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libnghttp2-sys/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"b359f5ec8106bc297694c9a562ace312be2cfd17a5fc68dc12249845aa144b11"} \ No newline at end of file +{"files":{},"package":"03624ec6df166e79e139a2310ca213283d6b3c30810c54844f307086d4488df1"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/libnghttp2-sys/Cargo.lock cargo-0.47.0/vendor/libnghttp2-sys/Cargo.lock --- cargo-0.44.1/vendor/libnghttp2-sys/Cargo.lock 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libnghttp2-sys/Cargo.lock 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [[package]] name = "libnghttp2-sys" -version = "0.1.3" +version = "0.1.4+1.41.0" dependencies = [ "cc 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.61 (registry+https://github.com/rust-lang/crates.io-index)", diff -Nru cargo-0.44.1/vendor/libnghttp2-sys/Cargo.toml cargo-0.47.0/vendor/libnghttp2-sys/Cargo.toml --- cargo-0.44.1/vendor/libnghttp2-sys/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libnghttp2-sys/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "libnghttp2-sys" -version = "0.1.3" +version = "0.1.4+1.41.0" authors = ["Alex Crichton "] links = "nghttp2" description = "FFI bindings for libnghttp2 (nghttp2)\n" diff -Nru cargo-0.44.1/vendor/libnghttp2-sys/.pc/use-system-lib.patch/Cargo.toml cargo-0.47.0/vendor/libnghttp2-sys/.pc/use-system-lib.patch/Cargo.toml --- cargo-0.44.1/vendor/libnghttp2-sys/.pc/use-system-lib.patch/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libnghttp2-sys/.pc/use-system-lib.patch/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "libnghttp2-sys" -version = "0.1.3" +version = "0.1.4+1.41.0" authors = ["Alex Crichton "] links = "nghttp2" description = "FFI bindings for libnghttp2 (nghttp2)\n" diff -Nru cargo-0.44.1/vendor/libssh2-sys/build.rs cargo-0.47.0/vendor/libssh2-sys/build.rs --- cargo-0.44.1/vendor/libssh2-sys/build.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libssh2-sys/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -10,14 +10,20 @@ use std::process::Command; fn main() { - if try_vcpkg() { + let zlib_ng_compat = env::var("CARGO_FEATURE_ZLIB_NG_COMPAT").is_ok(); + + if !zlib_ng_compat && try_vcpkg() { return; } // The system copy of libssh2 is not used by default because it // can lead to having two copies of libssl loaded at once. // See https://github.com/alexcrichton/ssh2-rs/pull/88 + println!("cargo:rerun-if-env-changed=LIBSSH2_SYS_USE_PKG_CONFIG"); if true { + if zlib_ng_compat { + panic!("LIBSSH2_SYS_USE_PKG_CONFIG set, but cannot use zlib-ng-compat with system libssh2"); + } if let Ok(lib) = pkg_config::find_library("libssh2") { for path in &lib.include_paths { println!("cargo:include={}", path.display()); diff -Nru cargo-0.44.1/vendor/libssh2-sys/.cargo-checksum.json cargo-0.47.0/vendor/libssh2-sys/.cargo-checksum.json --- cargo-0.44.1/vendor/libssh2-sys/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libssh2-sys/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"d45f516b9b19ea6c940b9f36d36734062a153a2b4cc9ef31d82c54bb9780f525"} \ No newline at end of file +{"files":{},"package":"ca46220853ba1c512fc82826d0834d87b06bcd3c2a42241b7de72f3d2fe17056"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/libssh2-sys/Cargo.toml cargo-0.47.0/vendor/libssh2-sys/Cargo.toml --- cargo-0.44.1/vendor/libssh2-sys/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libssh2-sys/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "libssh2-sys" -version = "0.2.17" +version = "0.2.19" authors = ["Alex Crichton ", "Wez Furlong "] build = "build.rs" links = "ssh2" @@ -28,7 +28,9 @@ version = "0.2" [dependencies.libz-sys] -version = "1.0.21" +version = "1.1.0" +features = ["libc"] +default-features = false [build-dependencies.cc] version = "1.0.25" @@ -37,6 +39,7 @@ [features] vendored-openssl = [] +zlib-ng-compat = ["libz-sys/zlib-ng"] [target."cfg(target_env = \"msvc\")".build-dependencies.vcpkg] version = "0.2" [target."cfg(unix)".dependencies.openssl-sys] diff -Nru cargo-0.44.1/vendor/libssh2-sys/debian/patches/disable-vendored.patch cargo-0.47.0/vendor/libssh2-sys/debian/patches/disable-vendored.patch --- cargo-0.44.1/vendor/libssh2-sys/debian/patches/disable-vendored.patch 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libssh2-sys/debian/patches/disable-vendored.patch 2020-10-01 21:38:28.000000000 +0000 @@ -6,6 +6,6 @@ [features] -vendored-openssl = ["openssl-sys/vendored"] +vendored-openssl = [] + zlib-ng-compat = ["libz-sys/zlib-ng"] [target."cfg(target_env = \"msvc\")".build-dependencies.vcpkg] version = "0.2" - [target."cfg(unix)".dependencies.openssl-sys] diff -Nru cargo-0.44.1/vendor/libssh2-sys/debian/patches/no-special-snowflake-env.patch cargo-0.47.0/vendor/libssh2-sys/debian/patches/no-special-snowflake-env.patch --- cargo-0.44.1/vendor/libssh2-sys/debian/patches/no-special-snowflake-env.patch 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libssh2-sys/debian/patches/no-special-snowflake-env.patch 2020-10-01 21:38:28.000000000 +0000 @@ -1,16 +1,15 @@ --- a/build.rs +++ b/build.rs -@@ -17,8 +17,7 @@ - // The system copy of libssh2 is not used by default because it +@@ -17,7 +17,7 @@ // can lead to having two copies of libssl loaded at once. // See https://github.com/alexcrichton/ssh2-rs/pull/88 -- println!("cargo:rerun-if-env-changed=LIBSSH2_SYS_USE_PKG_CONFIG"); + println!("cargo:rerun-if-env-changed=LIBSSH2_SYS_USE_PKG_CONFIG"); - if env::var("LIBSSH2_SYS_USE_PKG_CONFIG").is_ok() { + if true { - if let Ok(lib) = pkg_config::find_library("libssh2") { - for path in &lib.include_paths { - println!("cargo:include={}", path.display()); -@@ -27,7 +26,7 @@ + if zlib_ng_compat { + panic!("LIBSSH2_SYS_USE_PKG_CONFIG set, but cannot use zlib-ng-compat with system libssh2"); + } +@@ -26,7 +26,7 @@ } } diff -Nru cargo-0.44.1/vendor/libssh2-sys/.pc/disable-vendored.patch/Cargo.toml cargo-0.47.0/vendor/libssh2-sys/.pc/disable-vendored.patch/Cargo.toml --- cargo-0.44.1/vendor/libssh2-sys/.pc/disable-vendored.patch/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libssh2-sys/.pc/disable-vendored.patch/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "libssh2-sys" -version = "0.2.17" +version = "0.2.19" authors = ["Alex Crichton ", "Wez Furlong "] build = "build.rs" links = "ssh2" @@ -28,7 +28,9 @@ version = "0.2" [dependencies.libz-sys] -version = "1.0.21" +version = "1.1.0" +features = ["libc"] +default-features = false [build-dependencies.cc] version = "1.0.25" @@ -37,6 +39,7 @@ [features] vendored-openssl = ["openssl-sys/vendored"] +zlib-ng-compat = ["libz-sys/zlib-ng"] [target."cfg(target_env = \"msvc\")".build-dependencies.vcpkg] version = "0.2" [target."cfg(unix)".dependencies.openssl-sys] diff -Nru cargo-0.44.1/vendor/libssh2-sys/.pc/no-special-snowflake-env.patch/build.rs cargo-0.47.0/vendor/libssh2-sys/.pc/no-special-snowflake-env.patch/build.rs --- cargo-0.44.1/vendor/libssh2-sys/.pc/no-special-snowflake-env.patch/build.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libssh2-sys/.pc/no-special-snowflake-env.patch/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -10,7 +10,9 @@ use std::process::Command; fn main() { - if try_vcpkg() { + let zlib_ng_compat = env::var("CARGO_FEATURE_ZLIB_NG_COMPAT").is_ok(); + + if !zlib_ng_compat && try_vcpkg() { return; } @@ -19,6 +21,9 @@ // See https://github.com/alexcrichton/ssh2-rs/pull/88 println!("cargo:rerun-if-env-changed=LIBSSH2_SYS_USE_PKG_CONFIG"); if env::var("LIBSSH2_SYS_USE_PKG_CONFIG").is_ok() { + if zlib_ng_compat { + panic!("LIBSSH2_SYS_USE_PKG_CONFIG set, but cannot use zlib-ng-compat with system libssh2"); + } if let Ok(lib) = pkg_config::find_library("libssh2") { for path in &lib.include_paths { println!("cargo:include={}", path.display()); diff -Nru cargo-0.44.1/vendor/libz-sys/appveyor.yml cargo-0.47.0/vendor/libz-sys/appveyor.yml --- cargo-0.44.1/vendor/libz-sys/appveyor.yml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libz-sys/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -environment: - matrix: - - TARGET: x86_64-pc-windows-gnu - MSYS_BITS: 64 - - TARGET: i686-pc-windows-gnu - MSYS_BITS: 32 - - TARGET: x86_64-pc-windows-msvc - - TARGET: i686-pc-windows-msvc - - TARGET: x86_64-pc-windows-msvc - VCPKG_DEFAULT_TRIPLET: x64-windows-static - RUSTFLAGS: -Ctarget-feature=+crt-static -install: - - git submodule update --init --recursive - - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" - - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" - - set PATH=%PATH%;C:\Program Files (x86)\Rust\bin - - if defined MSYS_BITS set PATH=%PATH%;C:\msys64\mingw%MSYS_BITS%\bin;C:\msys64\usr\bin - - rustc -V - - cargo -V - - if defined VCPKG_DEFAULT_TRIPLET git clone https://github.com/Microsoft/vcpkg c:\projects\vcpkg - - if defined VCPKG_DEFAULT_TRIPLET c:\projects\vcpkg\bootstrap-vcpkg.bat - - if defined VCPKG_DEFAULT_TRIPLET set VCPKG_ROOT=c:\projects\vcpkg - - if defined VCPKG_DEFAULT_TRIPLET echo yes > %VCPKG_ROOT%\Downloads\AlwaysAllowDownloads - - if defined VCPKG_DEFAULT_TRIPLET %VCPKG_ROOT%\vcpkg.exe install zlib - -build: false - -test_script: - - cargo test --target %TARGET% - - cargo run --manifest-path systest/Cargo.toml --target %TARGET% diff -Nru cargo-0.44.1/vendor/libz-sys/build.rs cargo-0.47.0/vendor/libz-sys/build.rs --- cargo-0.44.1/vendor/libz-sys/build.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libz-sys/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,7 +1,7 @@ +extern crate cc; extern crate pkg_config; #[cfg(target_env = "msvc")] extern crate vcpkg; -extern crate cc; use std::env; use std::fs; @@ -13,52 +13,64 @@ println!("cargo:rerun-if-changed=build.rs"); let host = env::var("HOST").unwrap(); let target = env::var("TARGET").unwrap(); - let wants_asm = cfg!(feature = "asm"); let host_and_target_contain = |s| host.contains(s) && target.contains(s); + let want_ng = cfg!(feature = "zlib-ng") && !cfg!(feature = "stock-zlib"); + + if want_ng && target != "wasm32-unknown-unknown" { + return build_zlib_ng(&target); + } + // Don't run pkg-config if we're linking statically (we'll build below) and // also don't run pkg-config on macOS/FreeBSD/DragonFly. That'll end up printing // `-L /usr/lib` which wreaks havoc with linking to an OpenSSL in /usr/local/lib // (Homebrew, Ports, etc.) let want_static = cfg!(feature = "static") || env::var("LIBZ_SYS_STATIC").unwrap_or(String::new()) == "1"; - if !wants_asm && - !want_static && + if !want_static && !target.contains("msvc") && // pkg-config just never works here !(host_and_target_contain("apple") || host_and_target_contain("freebsd") || - host_and_target_contain("dragonfly")) && - pkg_config::Config::new().cargo_metadata(true).probe("zlib").is_ok() { - return + host_and_target_contain("dragonfly")) + { + // Don't print system lib dirs to cargo since this interferes with other + // packages adding non-system search paths to link against libraries + // that are also found in a system-wide lib dir. + let zlib = pkg_config::Config::new() + .cargo_metadata(true) + .print_system_libs(false) + .probe("zlib"); + if zlib.is_ok() { + return; + } } if target.contains("msvc") { - if !wants_asm && try_vcpkg() { + if try_vcpkg() { return; } } // All android compilers should come with libz by default, so let's just use // the one already there. - if !wants_asm && target.contains("android") { + if target.contains("android") { println!("cargo:rustc-link-lib=z"); - return + return; } let mut cfg = cc::Build::new(); - // Whitelist a bunch of situations where we build unconditionally. + // Situations where we build unconditionally. // // MSVC basically never has it preinstalled, MinGW picks up a bunch of weird // paths we don't like, `want_static` may force us, cross compiling almost // never has a prebuilt version, and musl is almost always static. - if wants_asm || - target.contains("msvc") || - target.contains("pc-windows-gnu") || - want_static || - target != host || - target.contains("musl") + if target.contains("msvc") + || target.contains("pc-windows-gnu") + || want_static + || target != host + || target.contains("musl") { return build_zlib(&mut cfg, &target); } @@ -71,7 +83,7 @@ // otherwise continue below to build things. if zlib_installed(&mut cfg) { println!("cargo:rustc-link-lib=z"); - return + return; } build_zlib(&mut cfg, &target) @@ -80,20 +92,13 @@ fn build_zlib(cfg: &mut cc::Build, target: &str) { let dst = PathBuf::from(env::var_os("OUT_DIR").unwrap()); let build = dst.join("build"); - let asm = cfg!(feature = "asm"); - cfg.warnings(false) - .out_dir(&build) - .include("src/zlib"); + cfg.warnings(false).out_dir(&build).include("src/zlib"); cfg.file("src/zlib/adler32.c") .file("src/zlib/compress.c") .file("src/zlib/crc32.c") .file("src/zlib/deflate.c") - .file("src/zlib/gzclose.c") - .file("src/zlib/gzlib.c") - .file("src/zlib/gzread.c") - .file("src/zlib/gzwrite.c") .file("src/zlib/infback.c") .file("src/zlib/inffast.c") .file("src/zlib/inflate.c") @@ -101,43 +106,29 @@ .file("src/zlib/trees.c") .file("src/zlib/uncompr.c") .file("src/zlib/zutil.c"); + + if !cfg!(feature = "libc") || target == "wasm32-unknown-unknown" { + cfg.define("Z_SOLO", None); + } else { + cfg.file("src/zlib/gzclose.c") + .file("src/zlib/gzlib.c") + .file("src/zlib/gzread.c") + .file("src/zlib/gzwrite.c"); + } + if !target.contains("windows") { cfg.define("STDC", None); cfg.define("_LARGEFILE64_SOURCE", None); cfg.define("_POSIX_SOURCE", None); cfg.flag("-fvisibility=hidden"); } - if target.contains("ios") { + if target.contains("apple") { cfg.define("_C99_SOURCE", None); } if target.contains("solaris") { cfg.define("_XOPEN_SOURCE", "700"); } - if asm { - if target.contains("windows-msvc") { - if target.starts_with("x86_64") { - cfg.file("src/zlib/contrib/masmx64/inffasx64.asm") - .file("src/zlib/contrib/masmx64/gvmat64.asm") - .define("ASMV", None) - .define("ASMINF", None); - } else if target.starts_with("i686") { - cfg.file("src/zlib/contrib/masmx86/inffas32.asm") - .file("src/zlib/contrib/masmx86/match686.asm") - .define("ASMV", None) - .define("ASMINF", None); - } - } else { - if target.starts_with("x86_64") { - cfg.file("src/zlib/contrib/amd64/amd64-match.S") - .define("ASMV", None); - } else if target.starts_with("i686") { - cfg.file("src/zlib/contrib/inflate86/inffast.S") - .define("ASMINF", None); - } - } - } - cfg.compile("z"); fs::create_dir_all(dst.join("lib/pkgconfig")).unwrap(); @@ -150,12 +141,43 @@ fs::read_to_string("src/zlib/zlib.pc.in") .unwrap() .replace("@prefix@", dst.to_str().unwrap()), - ).unwrap(); + ) + .unwrap(); println!("cargo:root={}", dst.to_str().unwrap()); println!("cargo:include={}/include", dst.to_str().unwrap()); } +#[cfg(not(feature = "zlib-ng"))] +fn build_zlib_ng(_target: &str) {} + +#[cfg(feature = "zlib-ng")] +fn build_zlib_ng(target: &str) { + let install_dir = cmake::Config::new("src/zlib-ng") + .define("BUILD_SHARED_LIBS", "OFF") + .define("ZLIB_COMPAT", "ON") + .define("WITH_GZFILEOP", "ON") + .build(); + let includedir = install_dir.join("include"); + let libdir = install_dir.join("lib"); + println!( + "cargo:rustc-link-search=native={}", + libdir.to_str().unwrap() + ); + let libname = if target.contains("windows") { + if target.contains("msvc") && env::var("OPT_LEVEL").unwrap() == "0" { + "zlibd" + } else { + "zlib" + } + } else { + "z" + }; + println!("cargo:rustc-link-lib=static={}", libname); + println!("cargo:root={}", install_dir.to_str().unwrap()); + println!("cargo:include={}", includedir.to_str().unwrap()); +} + #[cfg(not(target_env = "msvc"))] fn try_vcpkg() -> bool { false @@ -165,28 +187,27 @@ fn try_vcpkg() -> bool { // see if there is a vcpkg tree with zlib installed match vcpkg::Config::new() - .emit_includes(true) - .lib_names("zlib", "zlib1") - .probe("zlib") { - Ok(_) => { true }, + .emit_includes(true) + .lib_names("zlib", "zlib1") + .probe("zlib") + { + Ok(_) => true, Err(e) => { println!("note, vcpkg did not find zlib: {}", e); false - }, + } } } fn zlib_installed(cfg: &mut cc::Build) -> bool { let compiler = cfg.get_compiler(); let mut cmd = Command::new(compiler.path()); - cmd.arg("src/smoke.c") - .arg("-o").arg("/dev/null") - .arg("-lz"); + cmd.arg("src/smoke.c").arg("-o").arg("/dev/null").arg("-lz"); println!("running {:?}", cmd); if let Ok(status) = cmd.status() { if status.success() { - return true + return true; } } diff -Nru cargo-0.44.1/vendor/libz-sys/.cargo-checksum.json cargo-0.47.0/vendor/libz-sys/.cargo-checksum.json --- cargo-0.44.1/vendor/libz-sys/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libz-sys/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"2eb5e43362e38e2bca2fd5f5134c4d4564a23a5c28e9b95411652021a8675ebe"} \ No newline at end of file +{"files":{},"package":"602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/libz-sys/Cargo.toml cargo-0.47.0/vendor/libz-sys/Cargo.toml --- cargo-0.44.1/vendor/libz-sys/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libz-sys/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,25 +12,34 @@ [package] name = "libz-sys" -version = "1.0.25" -authors = ["Alex Crichton "] +version = "1.1.2" +authors = ["Alex Crichton ", "Josh Triplett "] build = "build.rs" links = "z" -description = "Bindings to the system libz library (also known as zlib).\n" +description = "Low-level bindings to the system libz library (also known as zlib).\n" documentation = "https://docs.rs/libz-sys" -categories = ["external-ffi-bindings"] -license = "MIT/Apache-2.0" -repository = "https://github.com/alexcrichton/libz-sys" +keywords = ["zlib", "zlib-ng"] +categories = ["compression", "external-ffi-bindings"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/libz-sys" [dependencies.libc] version = "0.2.43" +optional = true [build-dependencies.cc] version = "1.0.18" +[build-dependencies.cmake] +version = "0.1.44" +optional = true + [build-dependencies.pkg-config] version = "0.3.9" [features] asm = [] +default = ["libc", "stock-zlib"] static = [] +stock-zlib = [] +zlib-ng = ["libc", "cmake"] [target."cfg(target_env = \"msvc\")".build-dependencies.vcpkg] version = "0.2" diff -Nru cargo-0.44.1/vendor/libz-sys/ci/run-docker.sh cargo-0.47.0/vendor/libz-sys/ci/run-docker.sh --- cargo-0.44.1/vendor/libz-sys/ci/run-docker.sh 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libz-sys/ci/run-docker.sh 2020-10-01 21:38:28.000000000 +0000 @@ -1,3 +1,4 @@ +#!/bin/sh set -ex mkdir -p target diff -Nru cargo-0.44.1/vendor/libz-sys/ci/set_rust_version.bash cargo-0.47.0/vendor/libz-sys/ci/set_rust_version.bash --- cargo-0.44.1/vendor/libz-sys/ci/set_rust_version.bash 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/libz-sys/ci/set_rust_version.bash 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +set -e +rustup default $1 +rustup target add $2 diff -Nru cargo-0.44.1/vendor/libz-sys/ci/test.bash cargo-0.47.0/vendor/libz-sys/ci/test.bash --- cargo-0.44.1/vendor/libz-sys/ci/test.bash 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/libz-sys/ci/test.bash 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# Script for building your rust projects. +set -e + +required_arg() { + if [ -z "$1" ]; then + echo "Required argument $2 missing" + exit 1 + fi +} + +# $1 {path} = Path to cross/cargo executable +CROSS=$1 +# $2 {string} = +TARGET_TRIPLE=$2 + +required_arg $CROSS 'CROSS' +required_arg $TARGET_TRIPLE '' + +if [ "${TARGET_TRIPLE%-windows-gnu}" != "$TARGET_TRIPLE" ]; then + # On windows-gnu targets, we need to set the PATH to include MinGW + if [ "${TARGET_TRIPLE#x86_64-}" != "$TARGET_TRIPLE" ]; then + PATH=/c/msys64/mingw64/bin:/c/msys64/usr/bin:$PATH + elif [ "${TARGET_TRIPLE#i?86-}" != "$TARGET_TRIPLE" ]; then + PATH=/c/msys64/mingw32/bin:/c/msys64/usr/bin:$PATH + else + echo Unknown windows-gnu target + exit 1 + fi +fi + +$CROSS test --target $TARGET_TRIPLE +$CROSS run --target $TARGET_TRIPLE --manifest-path systest/Cargo.toml +echo === zlib-ng build === +$CROSS test --target $TARGET_TRIPLE --no-default-features --features zlib-ng +$CROSS run --target $TARGET_TRIPLE --manifest-path systest/Cargo.toml --no-default-features --features zlib-ng diff -Nru cargo-0.44.1/vendor/libz-sys/LICENSE-MIT cargo-0.47.0/vendor/libz-sys/LICENSE-MIT --- cargo-0.44.1/vendor/libz-sys/LICENSE-MIT 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libz-sys/LICENSE-MIT 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,5 @@ Copyright (c) 2014 Alex Crichton +Copyright (c) 2020 Josh Triplett Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated diff -Nru cargo-0.44.1/vendor/libz-sys/README.md cargo-0.47.0/vendor/libz-sys/README.md --- cargo-0.44.1/vendor/libz-sys/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libz-sys/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -2,11 +2,43 @@ A common library for linking `libz` to rust programs (also known as zlib). -[![Build Status](https://travis-ci.org/alexcrichton/libz-sys.svg?branch=master)](https://travis-ci.org/alexcrichton/libz-sys) -[![Build status](https://ci.appveyor.com/api/projects/status/et3ae5mgpbokh9g9?svg=true)](https://ci.appveyor.com/project/alexcrichton/libz-sys) - [Documentation](https://docs.rs/libz-sys) +# High-level API + +This crate provides bindings to the raw low-level C API. For a higher-level +safe API to work with DEFLATE, zlib, or gzip streams, see +[`flate2`](https://docs.rs/flate2). `flate2` also supports alternative +implementations, including slower but pure Rust implementations. + +# zlib-ng + +This crate supports building either the high-performance zlib-ng (in +zlib-compat mode), or the widely available stock zlib. + +By default, `libz-sys` uses stock zlib, primarily because doing so allows the +use of a shared system zlib library if available. + +Any application or library designed for zlib should work with zlib-ng in +zlib-compat mode, as long as it doesn't make assumptions about the exact size +or output of the deflated data (e.g. "compressing this data produces exactly +this many bytes"), and as long as you don't also dynamically pull in a copy of +stock zlib (which will produce conflicting symbols). Nonetheless, for maximum +compatibility, every library crate in a build must opt into allowing zlib-ng; +if any library crate in your dependency graph wants stock zlib, `libz-sys` will +use stock zlib. + +Library crates depending on `libz-sys` should use: +``` +libz-sys = { version = "1.1.0", default-features = false, features = ["libc"] } +``` +(Omit the `libc` feature if you don't require the corresponding functions.) + +This allows higher-level crates depending on your library to opt into zlib-ng +if desired. + +Building zlib-ng requires `cmake`. + # License This project is licensed under either of diff -Nru cargo-0.44.1/vendor/libz-sys/src/lib.rs cargo-0.47.0/vendor/libz-sys/src/lib.rs --- cargo-0.44.1/vendor/libz-sys/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/libz-sys/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,16 +1,15 @@ #![doc(html_root_url = "https://docs.rs/libz-sys/1.0")] #![allow(non_camel_case_types)] -extern crate libc; +use std::os::raw::{c_char, c_int, c_long, c_uchar, c_uint, c_ulong, c_void}; -use libc::{c_char, c_int, c_long, c_uchar, c_uint, c_ulong, c_void}; - -pub type alloc_func = unsafe extern fn (voidpf, uInt, uInt) -> voidpf; +pub type alloc_func = unsafe extern "C" fn(voidpf, uInt, uInt) -> voidpf; pub type Bytef = u8; -pub type free_func = unsafe extern fn (voidpf, voidpf); +pub type free_func = unsafe extern "C" fn(voidpf, voidpf); +#[cfg(feature = "libc")] pub type gzFile = *mut gzFile_s; -pub type in_func = unsafe extern fn (*mut c_void, *mut *const c_uchar) -> c_uint; -pub type out_func = unsafe extern fn (*mut c_void, *mut c_uchar, c_uint) -> c_int; +pub type in_func = unsafe extern "C" fn(*mut c_void, *mut *const c_uchar) -> c_uint; +pub type out_func = unsafe extern "C" fn(*mut c_void, *mut c_uchar, c_uint) -> c_int; pub type uInt = c_uint; pub type uLong = c_ulong; pub type uLongf = c_ulong; @@ -18,11 +17,12 @@ pub type voidpc = *const c_void; pub type voidpf = *mut c_void; +#[cfg(feature = "libc")] pub enum gzFile_s {} pub enum internal_state {} -#[cfg(unix)] pub type z_off_t = libc::off_t; -#[cfg(not(unix))] pub type z_off_t = c_long; +#[cfg(feature = "libc")] +pub type z_off_t = libc::off_t; #[repr(C)] #[derive(Copy, Clone)] @@ -63,29 +63,9 @@ } pub type z_streamp = *mut z_stream; -macro_rules! fns { - ($($arg:tt)*) => { - item! { - extern { $($arg)* } - } - } -} - -macro_rules! item { - ($i:item) => ($i) -} - -fns! { +extern "C" { pub fn adler32(adler: uLong, buf: *const Bytef, len: uInt) -> uLong; - pub fn adler32_combine(adler1: uLong, adler2: uLong, len2: z_off_t) -> uLong; - pub fn compress(dest: *mut Bytef, destLen: *mut uLongf, - source: *const Bytef, sourceLen: uLong) -> c_int; - pub fn compress2(dest: *mut Bytef, destLen: *mut uLongf, - source: *const Bytef, sourceLen: uLong, - level: c_int) -> c_int; - pub fn compressBound(sourceLen: uLong) -> uLong; pub fn crc32(crc: uLong, buf: *const Bytef, len: uInt) -> uLong; - pub fn crc32_combine(crc1: uLong, crc2: uLong, len2: z_off_t) -> uLong; pub fn deflate(strm: z_streamp, flush: c_int) -> c_int; pub fn deflateBound(strm: z_streamp, sourceLen: uLong) -> uLong; pub fn deflateCopy(dest: z_streamp, source: z_streamp) -> c_int; @@ -115,25 +95,6 @@ max_lazy: c_int, nice_length: c_int, max_chain: c_int) -> c_int; - pub fn gzdirect(file: gzFile) -> c_int; - pub fn gzdopen(fd: c_int, mode: *const c_char) -> gzFile; - pub fn gzclearerr(file: gzFile); - pub fn gzclose(file: gzFile) -> c_int; - pub fn gzeof(file: gzFile) -> c_int; - pub fn gzerror(file: gzFile, errnum: *mut c_int) -> *const c_char; - pub fn gzflush(file: gzFile, flush: c_int) -> c_int; - pub fn gzgetc(file: gzFile) -> c_int; - pub fn gzgets(file: gzFile, buf: *mut c_char, len: c_int) -> *mut c_char; - pub fn gzopen(path: *const c_char, mode: *const c_char) -> gzFile; - pub fn gzputc(file: gzFile, c: c_int) -> c_int; - pub fn gzputs(file: gzFile, s: *const c_char) -> c_int; - pub fn gzread(file: gzFile, buf: voidp, len: c_uint) -> c_int; - pub fn gzrewind(file: gzFile) -> c_int; - pub fn gzseek(file: gzFile, offset: z_off_t, whence: c_int) -> z_off_t; - pub fn gzsetparams(file: gzFile, level: c_int, strategy: c_int) -> c_int; - pub fn gztell(file: gzFile) -> z_off_t; - pub fn gzungetc(c: c_int, file: gzFile) -> c_int; - pub fn gzwrite(file: gzFile, buf: voidpc, len: c_uint) -> c_int; pub fn inflate(strm: z_streamp, flush: c_int) -> c_int; pub fn inflateBack(strm: z_streamp, _in: in_func, @@ -164,10 +125,6 @@ dictionary: *const Bytef, dictLength: uInt) -> c_int; pub fn inflateSync(strm: z_streamp) -> c_int; - pub fn uncompress(dest: *mut Bytef, - destLen: *mut uLongf, - source: *const Bytef, - sourceLen: uLong) -> c_int; pub fn zlibCompileFlags() -> uLong; pub fn zlibVersion() -> *const c_char; @@ -194,6 +151,41 @@ // pub fn gzoffset(file: gzFile) -> z_off_t; } +#[cfg(feature = "libc")] +extern "C" { + pub fn adler32_combine(adler1: uLong, adler2: uLong, len2: z_off_t) -> uLong; + pub fn compress(dest: *mut Bytef, destLen: *mut uLongf, + source: *const Bytef, sourceLen: uLong) -> c_int; + pub fn compress2(dest: *mut Bytef, destLen: *mut uLongf, + source: *const Bytef, sourceLen: uLong, + level: c_int) -> c_int; + pub fn compressBound(sourceLen: uLong) -> uLong; + pub fn crc32_combine(crc1: uLong, crc2: uLong, len2: z_off_t) -> uLong; + pub fn gzdirect(file: gzFile) -> c_int; + pub fn gzdopen(fd: c_int, mode: *const c_char) -> gzFile; + pub fn gzclearerr(file: gzFile); + pub fn gzclose(file: gzFile) -> c_int; + pub fn gzeof(file: gzFile) -> c_int; + pub fn gzerror(file: gzFile, errnum: *mut c_int) -> *const c_char; + pub fn gzflush(file: gzFile, flush: c_int) -> c_int; + pub fn gzgetc(file: gzFile) -> c_int; + pub fn gzgets(file: gzFile, buf: *mut c_char, len: c_int) -> *mut c_char; + pub fn gzopen(path: *const c_char, mode: *const c_char) -> gzFile; + pub fn gzputc(file: gzFile, c: c_int) -> c_int; + pub fn gzputs(file: gzFile, s: *const c_char) -> c_int; + pub fn gzread(file: gzFile, buf: voidp, len: c_uint) -> c_int; + pub fn gzrewind(file: gzFile) -> c_int; + pub fn gzseek(file: gzFile, offset: z_off_t, whence: c_int) -> z_off_t; + pub fn gzsetparams(file: gzFile, level: c_int, strategy: c_int) -> c_int; + pub fn gztell(file: gzFile) -> z_off_t; + pub fn gzungetc(c: c_int, file: gzFile) -> c_int; + pub fn gzwrite(file: gzFile, buf: voidpc, len: c_uint) -> c_int; + pub fn uncompress(dest: *mut Bytef, + destLen: *mut uLongf, + source: *const Bytef, + sourceLen: uLong) -> c_int; +} + pub const Z_NO_FLUSH: c_int = 0; pub const Z_PARTIAL_FLUSH: c_int = 1; pub const Z_SYNC_FLUSH: c_int = 2; diff -Nru cargo-0.44.1/vendor/log/build.rs cargo-0.47.0/vendor/log/build.rs --- cargo-0.44.1/vendor/log/build.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,14 +1,14 @@ -//! This build script detects target platforms that lack proper support for -//! atomics and sets `cfg` flags accordingly. - -use std::env; - -fn main() { - let target = env::var("TARGET").unwrap(); - - if !target.starts_with("thumbv6") { - println!("cargo:rustc-cfg=atomic_cas"); - } - - println!("cargo:rerun-if-changed=build.rs"); -} +//! This build script detects target platforms that lack proper support for +//! atomics and sets `cfg` flags accordingly. + +use std::env; + +fn main() { + let target = env::var("TARGET").unwrap(); + + if !target.starts_with("thumbv6") { + println!("cargo:rustc-cfg=atomic_cas"); + } + + println!("cargo:rerun-if-changed=build.rs"); +} diff -Nru cargo-0.44.1/vendor/log/.cargo-checksum.json cargo-0.47.0/vendor/log/.cargo-checksum.json --- cargo-0.44.1/vendor/log/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7"} \ No newline at end of file +{"files":{},"package":"4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/log/Cargo.toml cargo-0.47.0/vendor/log/Cargo.toml --- cargo-0.44.1/vendor/log/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,7 +12,7 @@ [package] name = "log" -version = "0.4.8" +version = "0.4.11" authors = ["The Rust Project Developers"] build = "build.rs" exclude = ["rfcs/**/*", "/.travis.yml", "/appveyor.yml"] @@ -29,6 +29,10 @@ [[test]] name = "filters" harness = false + +[[test]] +name = "macros" +harness = true [dependencies.cfg-if] version = "0.1.2" @@ -37,11 +41,20 @@ optional = true default-features = false -[dev-dependencies.serde_test] -version = "1.0" +# [dependencies.sval] +# version = "0.5.2" +# optional = true +# default-features = false +# [dev-dependencies.serde_test] +# version = "1.0" + +# [dev-dependencies.sval] +# version = "0.5.2" +# features = ["test"] [features] kv_unstable = [] +kv_unstable_sval = ["kv_unstable"]#, "sval/fmt"] max_level_debug = [] max_level_error = [] max_level_info = [] @@ -55,8 +68,3 @@ release_max_level_trace = [] release_max_level_warn = [] std = [] -[badges.appveyor] -repository = "alexcrichton/log" - -[badges.travis-ci] -repository = "rust-lang-nursery/log" diff -Nru cargo-0.44.1/vendor/log/CHANGELOG.md cargo-0.47.0/vendor/log/CHANGELOG.md --- cargo-0.44.1/vendor/log/CHANGELOG.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/CHANGELOG.md 2020-10-01 21:38:28.000000000 +0000 @@ -1,145 +1,186 @@ -# Change Log - -## [Unreleased] - -## [0.4.8] - 2019-07-28 - -### New - -* Support attempting to get `Record` fields as static strings. - -## [0.4.7] - 2019-07-06 - -### New - -* Support for embedded environments with thread-unsafe initialization. -* Initial unstable support for capturing structured data under the `kv_unstable` -feature gate. This new API doesn't affect existing users and may change in future -patches (so those changes may not appear in the changelog until it stabilizes). - -### Improved - -* Docs for using `log` with the 2018 edition. -* Error messages for macros missing arguments. - -## [0.4.6] - 2018-10-27 - -### Improved - -* Support 2018-style macro import for the `log_enabled!` macro. - -## [0.4.5] - 2018-09-03 - -### Improved - -* Make `log`'s internal helper macros less likely to conflict with user-defined - macros. - -## [0.4.4] - 2018-08-17 - -### Improved - -* Support 2018-style imports of the log macros. - -## [0.4.3] - 2018-06-29 - -### Improved - -* More code generation improvements. - -## [0.4.2] - 2018-06-05 - -### Improved - -* Log invocations now generate less code. - -### Fixed - -* Example Logger implementations now properly set the max log level. - -## [0.4.1] - 2017-12-30 - -### Fixed - -* Some doc links were fixed. - -## [0.4.0] - 2017-12-24 - -The changes in this release include cleanup of some obscure functionality and a more robust public -API designed to support bridges to other logging systems, and provide more flexibility to new -features in the future. - -### Compatibility - -Vast portions of the Rust ecosystem use the 0.3.x release series of log, and we don't want to force -the community to go through the pain of upgrading every crate to 0.4.x at the exact same time. Along -with 0.4.0, we've published a new 0.3.9 release which acts as a "shim" over 0.4.0. This will allow -crates using either version to coexist without losing messages from one side or the other. - -There is one caveat - a log message generated by a crate using 0.4.x but consumed by a logging -implementation using 0.3.x will not have a file name or module path. Applications affected by this -can upgrade their logging implementations to one using 0.4.x to avoid losing this information. The -other direction does not lose any information, fortunately! - -**TL;DR** Libraries should feel comfortable upgrading to 0.4.0 without treating that as a breaking -change. Applications may need to update their logging implementation (e.g. env-logger) to a newer -version using log 0.4.x to avoid losing module and file information. - -### New - -* The crate is now `no_std` by default. -* `Level` and `LevelFilter` now implement `Serialize` and `Deserialize` when the `serde` feature is - enabled. -* The `Record` and `Metadata` types can now be constructed by third-party code via a builder API. -* The `logger` free function returns a reference to the logger implementation. This, along with the - ability to construct `Record`s, makes it possible to bridge from another logging framework to - this one without digging into the private internals of the crate. The standard `error!` `warn!`, - etc, macros now exclusively use the public API of the crate rather than "secret" internal APIs. -* `Log::flush` has been added to allow crates to tell the logging implementation to ensure that all - "in flight" log events have been persisted. This can be used, for example, just before an - application exits to ensure that asynchronous log sinks finish their work. - -### Removed - -* The `shutdown` and `shutdown_raw` functions have been removed. Supporting shutdown significantly - complicated the implementation and imposed a performance cost on each logging operation. -* The `log_panics` function and its associated `nightly` Cargo feature have been removed. Use the - [log-panics](https://crates.io/crates/log-panics) instead. - -### Changed - -* The `Log` prefix has been removed from type names. For example, `LogLevelFilter` is now - `LevelFilter`, and `LogRecord` is now `Record`. -* The `MaxLogLevelFilter` object has been removed in favor of a `set_max_level` free function. -* The `set_logger` free functions have been restructured. The logger is now directly passed to the - functions rather than a closure which returns the logger. `set_logger` now takes a `&'static - Log` and is usable in `no_std` contexts in place of the old `set_logger_raw`. `set_boxed_logger` - is a convenience function which takes a `Box` but otherwise acts like `set_logger`. It - requires the `std` feature. -* The `file` and `module_path` values in `Record` no longer have the `'static` lifetime to support - integration with other logging frameworks that don't provide a `'static` lifetime for the - equivalent values. -* The `file`, `line`, and `module_path` values in `Record` are now `Option`s to support integration - with other logging frameworks that don't provide those values. - -### In the Future - -* We're looking to add support for *structured* logging - the inclusion of extra key-value pairs of - information in a log event in addition to the normal string message. This should be able to be - added in a backwards compatible manner to the 0.4.x series when the design is worked out. - -## Older - -Look at the [release tags] for information about older releases. - -[Unreleased]: https://github.com/rust-lang-nursery/log/compare/0.4.8...HEAD -[0.4.8]: https://github.com/rust-lang-nursery/log/compare/0.4.7...0.4.8 -[0.4.7]: https://github.com/rust-lang-nursery/log/compare/0.4.6...0.4.7 -[0.4.6]: https://github.com/rust-lang-nursery/log/compare/0.4.5...0.4.6 -[0.4.5]: https://github.com/rust-lang-nursery/log/compare/0.4.4...0.4.5 -[0.4.4]: https://github.com/rust-lang-nursery/log/compare/0.4.3...0.4.4 -[0.4.3]: https://github.com/rust-lang-nursery/log/compare/0.4.2...0.4.3 -[0.4.2]: https://github.com/rust-lang-nursery/log/compare/0.4.1...0.4.2 -[0.4.1]: https://github.com/rust-lang-nursery/log/compare/0.4.0...0.4.1 -[0.4.0]: https://github.com/rust-lang-nursery/log/compare/0.3.8...0.4.0 -[release tags]: https://github.com/rust-lang-nursery/log/releases +# Change Log + +## [Unreleased] + +## [0.4.11] - 2020-07-09 + +### New + +* Support coercing structured values into concrete types. +* Reference the `win_dbg_logger` in the readme. + +### Fixed + +* Updates a few deprecated items used internally. +* Fixed issues in docs and expands sections. +* Show the correct build badge in the readme. +* Fix up a possible inference breakage with structured value errors. +* Respect formatting flags in structured value formatting. + +## [0.4.10] - 2019-12-16 (yanked) + +### Fixed + +* Fixed the `log!` macros so they work in expression context (this regressed in `0.4.9`, which has been yanked). + +## [0.4.9] - 2019-12-12 (yanked) + +### Minimum Supported Rust Version + +This release bumps the minimum compiler version to `1.31.0`. This was mainly needed for `cfg-if`, +but between `1.16.0` and `1.31.0` there are a lot of language and library improvements we now +take advantage of. + +### New + +* Unstable support for capturing key-value pairs in a record using the `log!` macros + +### Improved + +* Better documentation for max level filters. +* Internal updates to line up with bumped MSRV + +## [0.4.8] - 2019-07-28 + +### New + +* Support attempting to get `Record` fields as static strings. + +## [0.4.7] - 2019-07-06 + +### New + +* Support for embedded environments with thread-unsafe initialization. +* Initial unstable support for capturing structured data under the `kv_unstable` +feature gate. This new API doesn't affect existing users and may change in future +patches (so those changes may not appear in the changelog until it stabilizes). + +### Improved + +* Docs for using `log` with the 2018 edition. +* Error messages for macros missing arguments. + +## [0.4.6] - 2018-10-27 + +### Improved + +* Support 2018-style macro import for the `log_enabled!` macro. + +## [0.4.5] - 2018-09-03 + +### Improved + +* Make `log`'s internal helper macros less likely to conflict with user-defined + macros. + +## [0.4.4] - 2018-08-17 + +### Improved + +* Support 2018-style imports of the log macros. + +## [0.4.3] - 2018-06-29 + +### Improved + +* More code generation improvements. + +## [0.4.2] - 2018-06-05 + +### Improved + +* Log invocations now generate less code. + +### Fixed + +* Example Logger implementations now properly set the max log level. + +## [0.4.1] - 2017-12-30 + +### Fixed + +* Some doc links were fixed. + +## [0.4.0] - 2017-12-24 + +The changes in this release include cleanup of some obscure functionality and a more robust public +API designed to support bridges to other logging systems, and provide more flexibility to new +features in the future. + +### Compatibility + +Vast portions of the Rust ecosystem use the 0.3.x release series of log, and we don't want to force +the community to go through the pain of upgrading every crate to 0.4.x at the exact same time. Along +with 0.4.0, we've published a new 0.3.9 release which acts as a "shim" over 0.4.0. This will allow +crates using either version to coexist without losing messages from one side or the other. + +There is one caveat - a log message generated by a crate using 0.4.x but consumed by a logging +implementation using 0.3.x will not have a file name or module path. Applications affected by this +can upgrade their logging implementations to one using 0.4.x to avoid losing this information. The +other direction does not lose any information, fortunately! + +**TL;DR** Libraries should feel comfortable upgrading to 0.4.0 without treating that as a breaking +change. Applications may need to update their logging implementation (e.g. env-logger) to a newer +version using log 0.4.x to avoid losing module and file information. + +### New + +* The crate is now `no_std` by default. +* `Level` and `LevelFilter` now implement `Serialize` and `Deserialize` when the `serde` feature is + enabled. +* The `Record` and `Metadata` types can now be constructed by third-party code via a builder API. +* The `logger` free function returns a reference to the logger implementation. This, along with the + ability to construct `Record`s, makes it possible to bridge from another logging framework to + this one without digging into the private internals of the crate. The standard `error!` `warn!`, + etc, macros now exclusively use the public API of the crate rather than "secret" internal APIs. +* `Log::flush` has been added to allow crates to tell the logging implementation to ensure that all + "in flight" log events have been persisted. This can be used, for example, just before an + application exits to ensure that asynchronous log sinks finish their work. + +### Removed + +* The `shutdown` and `shutdown_raw` functions have been removed. Supporting shutdown significantly + complicated the implementation and imposed a performance cost on each logging operation. +* The `log_panics` function and its associated `nightly` Cargo feature have been removed. Use the + [log-panics](https://crates.io/crates/log-panics) instead. + +### Changed + +* The `Log` prefix has been removed from type names. For example, `LogLevelFilter` is now + `LevelFilter`, and `LogRecord` is now `Record`. +* The `MaxLogLevelFilter` object has been removed in favor of a `set_max_level` free function. +* The `set_logger` free functions have been restructured. The logger is now directly passed to the + functions rather than a closure which returns the logger. `set_logger` now takes a `&'static + Log` and is usable in `no_std` contexts in place of the old `set_logger_raw`. `set_boxed_logger` + is a convenience function which takes a `Box` but otherwise acts like `set_logger`. It + requires the `std` feature. +* The `file` and `module_path` values in `Record` no longer have the `'static` lifetime to support + integration with other logging frameworks that don't provide a `'static` lifetime for the + equivalent values. +* The `file`, `line`, and `module_path` values in `Record` are now `Option`s to support integration + with other logging frameworks that don't provide those values. + +### In the Future + +* We're looking to add support for *structured* logging - the inclusion of extra key-value pairs of + information in a log event in addition to the normal string message. This should be able to be + added in a backwards compatible manner to the 0.4.x series when the design is worked out. + +## Older + +Look at the [release tags] for information about older releases. + +[Unreleased]: https://github.com/rust-lang-nursery/log/compare/0.4.11...HEAD +[0.4.11]: https://github.com/rust-lang-nursery/log/compare/0.4.10...0.4.11 +[0.4.10]: https://github.com/rust-lang-nursery/log/compare/0.4.9...0.4.10 +[0.4.9]: https://github.com/rust-lang-nursery/log/compare/0.4.8...0.4.9 +[0.4.8]: https://github.com/rust-lang-nursery/log/compare/0.4.7...0.4.8 +[0.4.7]: https://github.com/rust-lang-nursery/log/compare/0.4.6...0.4.7 +[0.4.6]: https://github.com/rust-lang-nursery/log/compare/0.4.5...0.4.6 +[0.4.5]: https://github.com/rust-lang-nursery/log/compare/0.4.4...0.4.5 +[0.4.4]: https://github.com/rust-lang-nursery/log/compare/0.4.3...0.4.4 +[0.4.3]: https://github.com/rust-lang-nursery/log/compare/0.4.2...0.4.3 +[0.4.2]: https://github.com/rust-lang-nursery/log/compare/0.4.1...0.4.2 +[0.4.1]: https://github.com/rust-lang-nursery/log/compare/0.4.0...0.4.1 +[0.4.0]: https://github.com/rust-lang-nursery/log/compare/0.3.8...0.4.0 +[release tags]: https://github.com/rust-lang-nursery/log/releases diff -Nru cargo-0.44.1/vendor/log/debian/patches/ignore-sval.diff cargo-0.47.0/vendor/log/debian/patches/ignore-sval.diff --- cargo-0.44.1/vendor/log/debian/patches/ignore-sval.diff 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/debian/patches/ignore-sval.diff 2020-10-01 21:38:28.000000000 +0000 @@ -1,6 +1,8 @@ ---- a/Cargo.toml 2020-02-21 15:24:20.134369814 -0500 -+++ b/Cargo.toml 2020-02-21 15:24:47.854425168 -0500 -@@ -24,7 +24,7 @@ +Index: log/Cargo.toml +=================================================================== +--- log.orig/Cargo.toml ++++ log/Cargo.toml +@@ -24,7 +24,7 @@ categories = ["development-tools::debugg license = "MIT OR Apache-2.0" repository = "https://github.com/rust-lang/log" [package.metadata.docs.rs] @@ -9,24 +11,35 @@ [[test]] name = "filters" -@@ -37,20 +37,11 @@ +@@ -41,20 +41,20 @@ version = "1.0" optional = true default-features = false -[dependencies.sval] --version = "0.4.2" +-version = "0.5.2" -optional = true -default-features = false - [dev-dependencies.serde_test] - version = "1.0" - +-[dev-dependencies.serde_test] +-version = "1.0" +- -[dev-dependencies.sval] --version = "0.4.2" +-version = "0.5.2" -features = ["test"] -- ++# [dependencies.sval] ++# version = "0.5.2" ++# optional = true ++# default-features = false ++# [dev-dependencies.serde_test] ++# version = "1.0" ++ ++# [dev-dependencies.sval] ++# version = "0.5.2" ++# features = ["test"] + [features] kv_unstable = [] -kv_unstable_sval = ["kv_unstable", "sval/fmt"] ++kv_unstable_sval = ["kv_unstable"]#, "sval/fmt"] max_level_debug = [] max_level_error = [] max_level_info = [] diff -Nru cargo-0.44.1/vendor/log/LICENSE-APACHE cargo-0.47.0/vendor/log/LICENSE-APACHE --- cargo-0.44.1/vendor/log/LICENSE-APACHE 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/LICENSE-APACHE 2020-10-01 21:38:28.000000000 +0000 @@ -1,201 +1,201 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff -Nru cargo-0.44.1/vendor/log/LICENSE-MIT cargo-0.47.0/vendor/log/LICENSE-MIT --- cargo-0.44.1/vendor/log/LICENSE-MIT 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/LICENSE-MIT 2020-10-01 21:38:28.000000000 +0000 @@ -1,25 +1,25 @@ -Copyright (c) 2014 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.44.1/vendor/log/.pc/ignore-sval.diff/Cargo.toml cargo-0.47.0/vendor/log/.pc/ignore-sval.diff/Cargo.toml --- cargo-0.44.1/vendor/log/.pc/ignore-sval.diff/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/.pc/ignore-sval.diff/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,7 +12,7 @@ [package] name = "log" -version = "0.4.8" +version = "0.4.11" authors = ["The Rust Project Developers"] build = "build.rs" exclude = ["rfcs/**/*", "/.travis.yml", "/appveyor.yml"] @@ -29,6 +29,10 @@ [[test]] name = "filters" harness = false + +[[test]] +name = "macros" +harness = true [dependencies.cfg-if] version = "0.1.2" @@ -38,14 +42,14 @@ default-features = false [dependencies.sval] -version = "0.4.2" +version = "0.5.2" optional = true default-features = false [dev-dependencies.serde_test] version = "1.0" [dev-dependencies.sval] -version = "0.4.2" +version = "0.5.2" features = ["test"] [features] @@ -64,8 +68,3 @@ release_max_level_trace = [] release_max_level_warn = [] std = [] -[badges.appveyor] -repository = "alexcrichton/log" - -[badges.travis-ci] -repository = "rust-lang-nursery/log" diff -Nru cargo-0.44.1/vendor/log/README.md cargo-0.47.0/vendor/log/README.md --- cargo-0.44.1/vendor/log/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -1,86 +1,86 @@ -log -=== - -A Rust library providing a lightweight logging *facade*. - -[![Build Status](https://travis-ci.com/rust-lang-nursery/log.svg?branch=master)](https://travis-ci.com/rust-lang-nursery/log) -[![Build status](https://ci.appveyor.com/api/projects/status/nopdjmmjt45xcrki?svg=true)](https://ci.appveyor.com/project/alexcrichton/log) -[![Latest version](https://img.shields.io/crates/v/log.svg)](https://crates.io/crates/log) -[![Documentation](https://docs.rs/log/badge.svg)](https://docs.rs/log) -![License](https://img.shields.io/crates/l/log.svg) - -* [`log` documentation](https://docs.rs/log) - -A logging facade provides a single logging API that abstracts over the actual -logging implementation. Libraries can use the logging API provided by this -crate, and the consumer of those libraries can choose the logging -implementation that is most suitable for its use case. - - -## Minimum supported `rustc` - -`1.16.0+` - -This version is explicitly tested in CI and may be bumped in any release as needed. Maintaining compatibility with older compilers is a priority though, so the bar for bumping the minimum supported version is set very high. Any changes to the supported minimum version will be called out in the release notes. - -## Usage - -## In libraries - -Libraries should link only to the `log` crate, and use the provided macros to -log whatever information will be useful to downstream consumers: - -```toml -[dependencies] -log = "0.4" -``` - -```rust -use log::{info, trace, warn}; - -pub fn shave_the_yak(yak: &mut Yak) { - trace!("Commencing yak shaving"); - - loop { - match find_a_razor() { - Ok(razor) => { - info!("Razor located: {}", razor); - yak.shave(razor); - break; - } - Err(err) => { - warn!("Unable to locate a razor: {}, retrying", err); - } - } - } -} -``` - -## In executables - -In order to produce log output, executables have to use a logger implementation compatible with the facade. -There are many available implementations to chose from, here are some of the most popular ones: - -* Simple minimal loggers: - * [`env_logger`](https://docs.rs/env_logger/*/env_logger/) - * [`simple_logger`](https://github.com/borntyping/rust-simple_logger) - * [`simplelog`](https://github.com/drakulix/simplelog.rs) - * [`pretty_env_logger`](https://docs.rs/pretty_env_logger/*/pretty_env_logger/) - * [`stderrlog`](https://docs.rs/stderrlog/*/stderrlog/) - * [`flexi_logger`](https://docs.rs/flexi_logger/*/flexi_logger/) -* Complex configurable frameworks: - * [`log4rs`](https://docs.rs/log4rs/*/log4rs/) - * [`fern`](https://docs.rs/fern/*/fern/) -* Adaptors for other facilities: - * [`syslog`](https://docs.rs/syslog/*/syslog/) - * [`slog-stdlog`](https://docs.rs/slog-stdlog/*/slog_stdlog/) - * [`android_log`](https://docs.rs/android_log/*/android_log/) -* For WebAssembly binaries: - * [`console_log`](https://docs.rs/console_log/*/console_log/) - -Executables should choose a logger implementation and initialize it early in the -runtime of the program. Logger implementations will typically include a -function to do this. Any log messages generated before the logger is -initialized will be ignored. - -The executable itself may use the `log` crate to log as well. +log +=== + +A Rust library providing a lightweight logging *facade*. + +[![Build status](https://img.shields.io/github/workflow/status/rust-lang/log/CI/master)](https://github.com/rust-lang/log/actions) +[![Latest version](https://img.shields.io/crates/v/log.svg)](https://crates.io/crates/log) +[![Documentation](https://docs.rs/log/badge.svg)](https://docs.rs/log) +![License](https://img.shields.io/crates/l/log.svg) + +* [`log` documentation](https://docs.rs/log) + +A logging facade provides a single logging API that abstracts over the actual +logging implementation. Libraries can use the logging API provided by this +crate, and the consumer of those libraries can choose the logging +implementation that is most suitable for its use case. + + +## Minimum supported `rustc` + +`1.31.0+` + +This version is explicitly tested in CI and may be bumped in any release as needed. Maintaining compatibility with older compilers is a priority though, so the bar for bumping the minimum supported version is set very high. Any changes to the supported minimum version will be called out in the release notes. + +## Usage + +## In libraries + +Libraries should link only to the `log` crate, and use the provided macros to +log whatever information will be useful to downstream consumers: + +```toml +[dependencies] +log = "0.4" +``` + +```rust +use log::{info, trace, warn}; + +pub fn shave_the_yak(yak: &mut Yak) { + trace!("Commencing yak shaving"); + + loop { + match find_a_razor() { + Ok(razor) => { + info!("Razor located: {}", razor); + yak.shave(razor); + break; + } + Err(err) => { + warn!("Unable to locate a razor: {}, retrying", err); + } + } + } +} +``` + +## In executables + +In order to produce log output, executables have to use a logger implementation compatible with the facade. +There are many available implementations to choose from, here are some of the most popular ones: + +* Simple minimal loggers: + * [`env_logger`](https://docs.rs/env_logger/*/env_logger/) + * [`simple_logger`](https://github.com/borntyping/rust-simple_logger) + * [`simplelog`](https://github.com/drakulix/simplelog.rs) + * [`pretty_env_logger`](https://docs.rs/pretty_env_logger/*/pretty_env_logger/) + * [`stderrlog`](https://docs.rs/stderrlog/*/stderrlog/) + * [`flexi_logger`](https://docs.rs/flexi_logger/*/flexi_logger/) +* Complex configurable frameworks: + * [`log4rs`](https://docs.rs/log4rs/*/log4rs/) + * [`fern`](https://docs.rs/fern/*/fern/) +* Adaptors for other facilities: + * [`syslog`](https://docs.rs/syslog/*/syslog/) + * [`slog-stdlog`](https://docs.rs/slog-stdlog/*/slog_stdlog/) + * [`android_log`](https://docs.rs/android_log/*/android_log/) + * [`win_dbg_logger`](https://docs.rs/win_dbg_logger/*/win_dbg_logger/) +* For WebAssembly binaries: + * [`console_log`](https://docs.rs/console_log/*/console_log/) + +Executables should choose a logger implementation and initialize it early in the +runtime of the program. Logger implementations will typically include a +function to do this. Any log messages generated before the logger is +initialized will be ignored. + +The executable itself may use the `log` crate to log as well. diff -Nru cargo-0.44.1/vendor/log/src/kv/error.rs cargo-0.47.0/vendor/log/src/kv/error.rs --- cargo-0.44.1/vendor/log/src/kv/error.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/src/kv/error.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,88 +1,70 @@ -use std::fmt; - -/// An error encountered while working with structured data. -#[derive(Debug)] -pub struct Error { - inner: Inner -} - -#[derive(Debug)] -enum Inner { - #[cfg(feature = "std")] - Boxed(std_support::BoxedError), - Msg(&'static str), - Fmt, -} - -impl Error { - /// Create an error from a message. - pub fn msg(msg: &'static str) -> Self { - Error { - inner: Inner::Msg(msg), - } - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::Inner::*; - match &self.inner { - #[cfg(feature = "std")] - &Boxed(ref err) => err.fmt(f), - &Msg(ref msg) => msg.fmt(f), - &Fmt => fmt::Error.fmt(f), - } - } -} - -impl From for Error { - fn from(_: fmt::Error) -> Self { - Error { - inner: Inner::Fmt, - } - } -} - -impl From for fmt::Error { - fn from(_: Error) -> Self { - fmt::Error - } -} - -#[cfg(feature = "std")] -mod std_support { - use super::*; - use std::{error, io}; - - pub(super) type BoxedError = Box; - - impl Error { - /// Create an error from a standard error type. - pub fn boxed(err: E) -> Self - where - E: Into, - { - Error { - inner: Inner::Boxed(err.into()) - } - } - } - - impl error::Error for Error { - fn description(&self) -> &str { - "key values error" - } - } - - impl From for Error { - fn from(err: io::Error) -> Self { - Error::boxed(err) - } - } - - impl From for io::Error { - fn from(err: Error) -> Self { - io::Error::new(io::ErrorKind::Other, err) - } - } -} +use std::fmt; + +/// An error encountered while working with structured data. +#[derive(Debug)] +pub struct Error { + inner: Inner, +} + +#[derive(Debug)] +enum Inner { + #[cfg(feature = "std")] + Boxed(std_support::BoxedError), + Msg(&'static str), + Fmt, +} + +impl Error { + /// Create an error from a message. + pub fn msg(msg: &'static str) -> Self { + Error { + inner: Inner::Msg(msg), + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::Inner::*; + match &self.inner { + #[cfg(feature = "std")] + &Boxed(ref err) => err.fmt(f), + &Msg(ref msg) => msg.fmt(f), + &Fmt => fmt::Error.fmt(f), + } + } +} + +impl From for Error { + fn from(_: fmt::Error) -> Self { + Error { inner: Inner::Fmt } + } +} + +#[cfg(feature = "std")] +mod std_support { + use super::*; + use std::{error, io}; + + pub(super) type BoxedError = Box; + + impl Error { + /// Create an error from a standard error type. + pub fn boxed(err: E) -> Self + where + E: Into, + { + Error { + inner: Inner::Boxed(err.into()), + } + } + } + + impl error::Error for Error {} + + impl From for Error { + fn from(err: io::Error) -> Self { + Error::boxed(err) + } + } +} diff -Nru cargo-0.44.1/vendor/log/src/kv/key.rs cargo-0.47.0/vendor/log/src/kv/key.rs --- cargo-0.44.1/vendor/log/src/kv/key.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/src/kv/key.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,143 +1,139 @@ -//! Structured keys. - -use std::fmt; -use std::cmp; -use std::hash; -use std::borrow::Borrow; - -/// A type that can be converted into a [`Key`](struct.Key.html). -pub trait ToKey { - /// Perform the conversion. - fn to_key(&self) -> Key; -} - -impl<'a, T> ToKey for &'a T -where - T: ToKey + ?Sized, -{ - fn to_key(&self) -> Key { - (**self).to_key() - } -} - -impl<'k> ToKey for Key<'k> { - fn to_key(&self) -> Key { - Key { - key: self.key, - } - } -} - -impl ToKey for str { - fn to_key(&self) -> Key { - Key::from_str(self) - } -} - -/// A key in a structured key-value pair. -#[derive(Clone)] -pub struct Key<'k> { - key: &'k str, -} - -impl<'k> Key<'k> { - /// Get a key from a borrowed string. - pub fn from_str(key: &'k str) -> Self { - Key { - key: key, - } - } - - /// Get a borrowed string from this key. - pub fn as_str(&self) -> &str { - self.key - } -} - -impl<'k> fmt::Debug for Key<'k> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.key.fmt(f) - } -} - -impl<'k> fmt::Display for Key<'k> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.key.fmt(f) - } -} - -impl<'k> hash::Hash for Key<'k> { - fn hash(&self, state: &mut H) - where - H: hash::Hasher, - { - self.as_str().hash(state) - } -} - -impl<'k, 'ko> PartialEq> for Key<'k> { - fn eq(&self, other: &Key<'ko>) -> bool { - self.as_str().eq(other.as_str()) - } -} - -impl<'k> Eq for Key<'k> {} - -impl<'k, 'ko> PartialOrd> for Key<'k> { - fn partial_cmp(&self, other: &Key<'ko>) -> Option { - self.as_str().partial_cmp(other.as_str()) - } -} - -impl<'k> Ord for Key<'k> { - fn cmp(&self, other: &Self) -> cmp::Ordering { - self.as_str().cmp(other.as_str()) - } -} - -impl<'k> AsRef for Key<'k> { - fn as_ref(&self) -> &str { - self.as_str() - } -} - -impl<'k> Borrow for Key<'k> { - fn borrow(&self) -> &str { - self.as_str() - } -} - -impl<'k> From<&'k str> for Key<'k> { - fn from(s: &'k str) -> Self { - Key::from_str(s) - } -} - -#[cfg(feature = "std")] -mod std_support { - use super::*; - - use std::borrow::Cow; - - impl ToKey for String { - fn to_key(&self) -> Key { - Key::from_str(self) - } - } - - impl<'a> ToKey for Cow<'a, str> { - fn to_key(&self) -> Key { - Key::from_str(self) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn key_from_string() { - assert_eq!("a key", Key::from_str("a key").as_str()); - } -} +//! Structured keys. + +use std::borrow::Borrow; +use std::cmp; +use std::fmt; +use std::hash; + +/// A type that can be converted into a [`Key`](struct.Key.html). +pub trait ToKey { + /// Perform the conversion. + fn to_key(&self) -> Key; +} + +impl<'a, T> ToKey for &'a T +where + T: ToKey + ?Sized, +{ + fn to_key(&self) -> Key { + (**self).to_key() + } +} + +impl<'k> ToKey for Key<'k> { + fn to_key(&self) -> Key { + Key { key: self.key } + } +} + +impl ToKey for str { + fn to_key(&self) -> Key { + Key::from_str(self) + } +} + +/// A key in a structured key-value pair. +#[derive(Clone)] +pub struct Key<'k> { + key: &'k str, +} + +impl<'k> Key<'k> { + /// Get a key from a borrowed string. + pub fn from_str(key: &'k str) -> Self { + Key { key: key } + } + + /// Get a borrowed string from this key. + pub fn as_str(&self) -> &str { + self.key + } +} + +impl<'k> fmt::Debug for Key<'k> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.key.fmt(f) + } +} + +impl<'k> fmt::Display for Key<'k> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.key.fmt(f) + } +} + +impl<'k> hash::Hash for Key<'k> { + fn hash(&self, state: &mut H) + where + H: hash::Hasher, + { + self.as_str().hash(state) + } +} + +impl<'k, 'ko> PartialEq> for Key<'k> { + fn eq(&self, other: &Key<'ko>) -> bool { + self.as_str().eq(other.as_str()) + } +} + +impl<'k> Eq for Key<'k> {} + +impl<'k, 'ko> PartialOrd> for Key<'k> { + fn partial_cmp(&self, other: &Key<'ko>) -> Option { + self.as_str().partial_cmp(other.as_str()) + } +} + +impl<'k> Ord for Key<'k> { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.as_str().cmp(other.as_str()) + } +} + +impl<'k> AsRef for Key<'k> { + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl<'k> Borrow for Key<'k> { + fn borrow(&self) -> &str { + self.as_str() + } +} + +impl<'k> From<&'k str> for Key<'k> { + fn from(s: &'k str) -> Self { + Key::from_str(s) + } +} + +#[cfg(feature = "std")] +mod std_support { + use super::*; + + use std::borrow::Cow; + + impl ToKey for String { + fn to_key(&self) -> Key { + Key::from_str(self) + } + } + + impl<'a> ToKey for Cow<'a, str> { + fn to_key(&self) -> Key { + Key::from_str(self) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn key_from_string() { + assert_eq!("a key", Key::from_str("a key").as_str()); + } +} diff -Nru cargo-0.44.1/vendor/log/src/kv/mod.rs cargo-0.47.0/vendor/log/src/kv/mod.rs --- cargo-0.44.1/vendor/log/src/kv/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/src/kv/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,23 +1,26 @@ -//! **UNSTABLE:** Structured key-value pairs. -//! -//! This module is unstable and breaking changes may be made -//! at any time. See [the tracking issue](https://github.com/rust-lang-nursery/log/issues/328) -//! for more details. -//! -//! Add the `kv_unstable` feature to your `Cargo.toml` to enable -//! this module: -//! -//! ```toml -//! [dependencies.log] -//! features = ["kv_unstable"] -//! ``` - -mod error; -mod source; -mod key; -pub mod value; - -pub use self::error::Error; -pub use self::source::{Source, Visitor}; -pub use self::key::{Key, ToKey}; -pub use self::value::{Value, ToValue}; +//! **UNSTABLE:** Structured key-value pairs. +//! +//! This module is unstable and breaking changes may be made +//! at any time. See [the tracking issue](https://github.com/rust-lang-nursery/log/issues/328) +//! for more details. +//! +//! Add the `kv_unstable` feature to your `Cargo.toml` to enable +//! this module: +//! +//! ```toml +//! [dependencies.log] +//! features = ["kv_unstable"] +//! ``` + +mod error; +mod key; +mod source; + +pub mod value; + +pub use self::error::Error; +pub use self::key::{Key, ToKey}; +pub use self::source::{Source, Visitor}; + +#[doc(inline)] +pub use self::value::{ToValue, Value}; diff -Nru cargo-0.44.1/vendor/log/src/kv/source.rs cargo-0.47.0/vendor/log/src/kv/source.rs --- cargo-0.44.1/vendor/log/src/kv/source.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/src/kv/source.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,396 +1,393 @@ -//! Sources for key-value pairs. - -use std::fmt; -use kv::{Error, Key, ToKey, Value, ToValue}; - -/// A source of key-value pairs. -/// -/// The source may be a single pair, a set of pairs, or a filter over a set of pairs. -/// Use the [`Visitor`](trait.Visitor.html) trait to inspect the structured data -/// in a source. -pub trait Source { - /// Visit key-value pairs. - /// - /// A source doesn't have to guarantee any ordering or uniqueness of key-value pairs. - /// If the given visitor returns an error then the source may early-return with it, - /// even if there are more key-value pairs. - /// - /// # Implementation notes - /// - /// A source should yield the same key-value pairs to a subsequent visitor unless - /// that visitor itself fails. - fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error>; - - /// Get the value for a given key. - /// - /// If the key appears multiple times in the source then which key is returned - /// is implementation specific. - /// - /// # Implementation notes - /// - /// A source that can provide a more efficient implementation of this method - /// should override it. - fn get<'v>(&'v self, key: Key) -> Option> { - struct Get<'k, 'v> { - key: Key<'k>, - found: Option>, - } - - impl<'k, 'kvs> Visitor<'kvs> for Get<'k, 'kvs> { - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { - if self.key == key { - self.found = Some(value); - } - - Ok(()) - } - } - - let mut get = Get { - key, - found: None, - }; - - let _ = self.visit(&mut get); - get.found - } - - /// Count the number of key-value pairs that can be visited. - /// - /// # Implementation notes - /// - /// A source that knows the number of key-value pairs upfront may provide a more - /// efficient implementation. - /// - /// A subsequent call to `visit` should yield the same number of key-value pairs - /// to the visitor, unless that visitor fails part way through. - fn count(&self) -> usize { - struct Count(usize); - - impl<'kvs> Visitor<'kvs> for Count { - fn visit_pair(&mut self, _: Key<'kvs>, _: Value<'kvs>) -> Result<(), Error> { - self.0 += 1; - - Ok(()) - } - } - - let mut count = Count(0); - let _ = self.visit(&mut count); - count.0 - } -} - -impl<'a, T> Source for &'a T -where - T: Source + ?Sized, -{ - fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { - Source::visit(&**self, visitor) - } - - fn get<'v>(&'v self, key: Key) -> Option> { - Source::get(&**self, key) - } - - fn count(&self) -> usize { - Source::count(&**self) - } -} - -impl Source for (K, V) -where - K: ToKey, - V: ToValue, -{ - fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { - visitor.visit_pair(self.0.to_key(), self.1.to_value()) - } - - fn get<'v>(&'v self, key: Key) -> Option> { - if self.0.to_key() == key { - Some(self.1.to_value()) - } else { - None - } - } - - fn count(&self) -> usize { - 1 - } -} - -impl Source for [S] -where - S: Source, -{ - fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { - for source in self { - source.visit(visitor)?; - } - - Ok(()) - } - - fn count(&self) -> usize { - self.len() - } -} - -impl Source for Option -where - S: Source, -{ - fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { - if let Some(ref source) = *self { - source.visit(visitor)?; - } - - Ok(()) - } - - fn count(&self) -> usize { - self.as_ref().map(Source::count).unwrap_or(0) - } -} - -/// A visitor for the key-value pairs in a [`Source`](trait.Source.html). -pub trait Visitor<'kvs> { - /// Visit a key-value pair. - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error>; -} - -impl<'a, 'kvs, T> Visitor<'kvs> for &'a mut T -where - T: Visitor<'kvs> + ?Sized, -{ - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { - (**self).visit_pair(key, value) - } -} - -impl<'a, 'b: 'a, 'kvs> Visitor<'kvs> for fmt::DebugMap<'a, 'b> { - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { - self.entry(&key, &value); - Ok(()) - } -} - -impl<'a, 'b: 'a, 'kvs> Visitor<'kvs> for fmt::DebugList<'a, 'b> { - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { - self.entry(&(key, value)); - Ok(()) - } -} - -impl<'a, 'b: 'a, 'kvs> Visitor<'kvs> for fmt::DebugSet<'a, 'b> { - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { - self.entry(&(key, value)); - Ok(()) - } -} - -impl<'a, 'b: 'a, 'kvs> Visitor<'kvs> for fmt::DebugTuple<'a, 'b> { - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { - self.field(&key); - self.field(&value); - Ok(()) - } -} - -#[cfg(feature = "std")] -mod std_support { - use super::*; - use std::borrow::Borrow; - use std::collections::{BTreeMap, HashMap}; - use std::hash::{BuildHasher, Hash}; - - impl Source for Box - where - S: Source + ?Sized, - { - fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { - Source::visit(&**self, visitor) - } - - fn get<'v>(&'v self, key: Key) -> Option> { - Source::get(&**self, key) - } - - fn count(&self) -> usize { - Source::count(&**self) - } - } - - impl Source for Vec - where - S: Source, - { - fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { - Source::visit(&**self, visitor) - } - - fn get<'v>(&'v self, key: Key) -> Option> { - Source::get(&**self, key) - } - - fn count(&self) -> usize { - Source::count(&**self) - } - } - - impl<'kvs, V> Visitor<'kvs> for Box - where - V: Visitor<'kvs> + ?Sized, - { - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { - (**self).visit_pair(key, value) - } - } - - impl Source for HashMap - where - K: ToKey + Borrow + Eq + Hash, - V: ToValue, - S: BuildHasher, - { - fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { - for (key, value) in self { - visitor.visit_pair(key.to_key(), value.to_value())?; - } - Ok(()) - } - - fn get<'v>(&'v self, key: Key) -> Option> { - HashMap::get(self, key.as_str()).map(|v| v.to_value()) - } - - fn count(&self) -> usize { - self.len() - } - } - - impl Source for BTreeMap - where - K: ToKey + Borrow + Ord, - V: ToValue, - { - fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { - for (key, value) in self { - visitor.visit_pair(key.to_key(), value.to_value())?; - } - Ok(()) - } - - fn get<'v>(&'v self, key: Key) -> Option> { - BTreeMap::get(self, key.as_str()).map(|v| v.to_value()) - } - - fn count(&self) -> usize { - self.len() - } - } - - #[cfg(test)] - mod tests { - use super::*; - use kv::value::test::Token; - use std::collections::{BTreeMap, HashMap}; - - #[test] - fn count() { - assert_eq!(1, Source::count(&Box::new(("a", 1)))); - assert_eq!(2, Source::count(&vec![("a", 1), ("b", 2)])); - } - - #[test] - fn get() { - let source = vec![("a", 1), ("b", 2), ("a", 1)]; - assert_eq!( - Token::I64(1), - Source::get(&source, Key::from_str("a")).unwrap().to_token() - ); - - let source = Box::new(Option::None::<(&str, i32)>); - assert!(Source::get(&source, Key::from_str("a")).is_none()); - } - - #[test] - fn hash_map() { - let mut map = HashMap::new(); - map.insert("a", 1); - map.insert("b", 2); - - assert_eq!(2, Source::count(&map)); - assert_eq!( - Token::I64(1), - Source::get(&map, Key::from_str("a")).unwrap().to_token() - ); - } - - #[test] - fn btree_map() { - let mut map = BTreeMap::new(); - map.insert("a", 1); - map.insert("b", 2); - - assert_eq!(2, Source::count(&map)); - assert_eq!( - Token::I64(1), - Source::get(&map, Key::from_str("a")).unwrap().to_token() - ); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use kv::value::test::Token; - - #[test] - fn source_is_object_safe() { - fn _check(_: &Source) {} - } - - #[test] - fn visitor_is_object_safe() { - fn _check(_: &Visitor) {} - } - - #[test] - fn count() { - struct OnePair { - key: &'static str, - value: i32, - } - - impl Source for OnePair { - fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { - visitor.visit_pair(self.key.to_key(), self.value.to_value()) - } - } - - assert_eq!(1, Source::count(&("a", 1))); - assert_eq!(2, Source::count(&[("a", 1), ("b", 2)] as &[_])); - assert_eq!(0, Source::count(&Option::None::<(&str, i32)>)); - assert_eq!(1, Source::count(&OnePair { key: "a", value: 1 })); - } - - #[test] - fn get() { - let source = &[("a", 1), ("b", 2), ("a", 1)] as &[_]; - assert_eq!( - Token::I64(1), - Source::get(source, Key::from_str("a")).unwrap().to_token() - ); - assert_eq!( - Token::I64(2), - Source::get(source, Key::from_str("b")).unwrap().to_token() - ); - assert!(Source::get(&source, Key::from_str("c")).is_none()); - - let source = Option::None::<(&str, i32)>; - assert!(Source::get(&source, Key::from_str("a")).is_none()); - } -} +//! Sources for key-value pairs. + +use kv::{Error, Key, ToKey, ToValue, Value}; +use std::fmt; + +/// A source of key-value pairs. +/// +/// The source may be a single pair, a set of pairs, or a filter over a set of pairs. +/// Use the [`Visitor`](trait.Visitor.html) trait to inspect the structured data +/// in a source. +pub trait Source { + /// Visit key-value pairs. + /// + /// A source doesn't have to guarantee any ordering or uniqueness of key-value pairs. + /// If the given visitor returns an error then the source may early-return with it, + /// even if there are more key-value pairs. + /// + /// # Implementation notes + /// + /// A source should yield the same key-value pairs to a subsequent visitor unless + /// that visitor itself fails. + fn visit<'kvs>(&'kvs self, visitor: &mut dyn Visitor<'kvs>) -> Result<(), Error>; + + /// Get the value for a given key. + /// + /// If the key appears multiple times in the source then which key is returned + /// is implementation specific. + /// + /// # Implementation notes + /// + /// A source that can provide a more efficient implementation of this method + /// should override it. + fn get<'v>(&'v self, key: Key) -> Option> { + struct Get<'k, 'v> { + key: Key<'k>, + found: Option>, + } + + impl<'k, 'kvs> Visitor<'kvs> for Get<'k, 'kvs> { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + if self.key == key { + self.found = Some(value); + } + + Ok(()) + } + } + + let mut get = Get { key, found: None }; + + let _ = self.visit(&mut get); + get.found + } + + /// Count the number of key-value pairs that can be visited. + /// + /// # Implementation notes + /// + /// A source that knows the number of key-value pairs upfront may provide a more + /// efficient implementation. + /// + /// A subsequent call to `visit` should yield the same number of key-value pairs + /// to the visitor, unless that visitor fails part way through. + fn count(&self) -> usize { + struct Count(usize); + + impl<'kvs> Visitor<'kvs> for Count { + fn visit_pair(&mut self, _: Key<'kvs>, _: Value<'kvs>) -> Result<(), Error> { + self.0 += 1; + + Ok(()) + } + } + + let mut count = Count(0); + let _ = self.visit(&mut count); + count.0 + } +} + +impl<'a, T> Source for &'a T +where + T: Source + ?Sized, +{ + fn visit<'kvs>(&'kvs self, visitor: &mut dyn Visitor<'kvs>) -> Result<(), Error> { + Source::visit(&**self, visitor) + } + + fn get<'v>(&'v self, key: Key) -> Option> { + Source::get(&**self, key) + } + + fn count(&self) -> usize { + Source::count(&**self) + } +} + +impl Source for (K, V) +where + K: ToKey, + V: ToValue, +{ + fn visit<'kvs>(&'kvs self, visitor: &mut dyn Visitor<'kvs>) -> Result<(), Error> { + visitor.visit_pair(self.0.to_key(), self.1.to_value()) + } + + fn get<'v>(&'v self, key: Key) -> Option> { + if self.0.to_key() == key { + Some(self.1.to_value()) + } else { + None + } + } + + fn count(&self) -> usize { + 1 + } +} + +impl Source for [S] +where + S: Source, +{ + fn visit<'kvs>(&'kvs self, visitor: &mut dyn Visitor<'kvs>) -> Result<(), Error> { + for source in self { + source.visit(visitor)?; + } + + Ok(()) + } + + fn count(&self) -> usize { + self.len() + } +} + +impl Source for Option +where + S: Source, +{ + fn visit<'kvs>(&'kvs self, visitor: &mut dyn Visitor<'kvs>) -> Result<(), Error> { + if let Some(ref source) = *self { + source.visit(visitor)?; + } + + Ok(()) + } + + fn count(&self) -> usize { + self.as_ref().map(Source::count).unwrap_or(0) + } +} + +/// A visitor for the key-value pairs in a [`Source`](trait.Source.html). +pub trait Visitor<'kvs> { + /// Visit a key-value pair. + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error>; +} + +impl<'a, 'kvs, T> Visitor<'kvs> for &'a mut T +where + T: Visitor<'kvs> + ?Sized, +{ + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + (**self).visit_pair(key, value) + } +} + +impl<'a, 'b: 'a, 'kvs> Visitor<'kvs> for fmt::DebugMap<'a, 'b> { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + self.entry(&key, &value); + Ok(()) + } +} + +impl<'a, 'b: 'a, 'kvs> Visitor<'kvs> for fmt::DebugList<'a, 'b> { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + self.entry(&(key, value)); + Ok(()) + } +} + +impl<'a, 'b: 'a, 'kvs> Visitor<'kvs> for fmt::DebugSet<'a, 'b> { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + self.entry(&(key, value)); + Ok(()) + } +} + +impl<'a, 'b: 'a, 'kvs> Visitor<'kvs> for fmt::DebugTuple<'a, 'b> { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + self.field(&key); + self.field(&value); + Ok(()) + } +} + +#[cfg(feature = "std")] +mod std_support { + use super::*; + use std::borrow::Borrow; + use std::collections::{BTreeMap, HashMap}; + use std::hash::{BuildHasher, Hash}; + + impl Source for Box + where + S: Source + ?Sized, + { + fn visit<'kvs>(&'kvs self, visitor: &mut dyn Visitor<'kvs>) -> Result<(), Error> { + Source::visit(&**self, visitor) + } + + fn get<'v>(&'v self, key: Key) -> Option> { + Source::get(&**self, key) + } + + fn count(&self) -> usize { + Source::count(&**self) + } + } + + impl Source for Vec + where + S: Source, + { + fn visit<'kvs>(&'kvs self, visitor: &mut dyn Visitor<'kvs>) -> Result<(), Error> { + Source::visit(&**self, visitor) + } + + fn get<'v>(&'v self, key: Key) -> Option> { + Source::get(&**self, key) + } + + fn count(&self) -> usize { + Source::count(&**self) + } + } + + impl<'kvs, V> Visitor<'kvs> for Box + where + V: Visitor<'kvs> + ?Sized, + { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + (**self).visit_pair(key, value) + } + } + + impl Source for HashMap + where + K: ToKey + Borrow + Eq + Hash, + V: ToValue, + S: BuildHasher, + { + fn visit<'kvs>(&'kvs self, visitor: &mut dyn Visitor<'kvs>) -> Result<(), Error> { + for (key, value) in self { + visitor.visit_pair(key.to_key(), value.to_value())?; + } + Ok(()) + } + + fn get<'v>(&'v self, key: Key) -> Option> { + HashMap::get(self, key.as_str()).map(|v| v.to_value()) + } + + fn count(&self) -> usize { + self.len() + } + } + + impl Source for BTreeMap + where + K: ToKey + Borrow + Ord, + V: ToValue, + { + fn visit<'kvs>(&'kvs self, visitor: &mut dyn Visitor<'kvs>) -> Result<(), Error> { + for (key, value) in self { + visitor.visit_pair(key.to_key(), value.to_value())?; + } + Ok(()) + } + + fn get<'v>(&'v self, key: Key) -> Option> { + BTreeMap::get(self, key.as_str()).map(|v| v.to_value()) + } + + fn count(&self) -> usize { + self.len() + } + } + + #[cfg(test)] + mod tests { + use super::*; + use kv::value::test::Token; + use std::collections::{BTreeMap, HashMap}; + + #[test] + fn count() { + assert_eq!(1, Source::count(&Box::new(("a", 1)))); + assert_eq!(2, Source::count(&vec![("a", 1), ("b", 2)])); + } + + #[test] + fn get() { + let source = vec![("a", 1), ("b", 2), ("a", 1)]; + assert_eq!( + Token::I64(1), + Source::get(&source, Key::from_str("a")).unwrap().to_token() + ); + + let source = Box::new(Option::None::<(&str, i32)>); + assert!(Source::get(&source, Key::from_str("a")).is_none()); + } + + #[test] + fn hash_map() { + let mut map = HashMap::new(); + map.insert("a", 1); + map.insert("b", 2); + + assert_eq!(2, Source::count(&map)); + assert_eq!( + Token::I64(1), + Source::get(&map, Key::from_str("a")).unwrap().to_token() + ); + } + + #[test] + fn btree_map() { + let mut map = BTreeMap::new(); + map.insert("a", 1); + map.insert("b", 2); + + assert_eq!(2, Source::count(&map)); + assert_eq!( + Token::I64(1), + Source::get(&map, Key::from_str("a")).unwrap().to_token() + ); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use kv::value::test::Token; + + #[test] + fn source_is_object_safe() { + fn _check(_: &dyn Source) {} + } + + #[test] + fn visitor_is_object_safe() { + fn _check(_: &dyn Visitor) {} + } + + #[test] + fn count() { + struct OnePair { + key: &'static str, + value: i32, + } + + impl Source for OnePair { + fn visit<'kvs>(&'kvs self, visitor: &mut dyn Visitor<'kvs>) -> Result<(), Error> { + visitor.visit_pair(self.key.to_key(), self.value.to_value()) + } + } + + assert_eq!(1, Source::count(&("a", 1))); + assert_eq!(2, Source::count(&[("a", 1), ("b", 2)] as &[_])); + assert_eq!(0, Source::count(&Option::None::<(&str, i32)>)); + assert_eq!(1, Source::count(&OnePair { key: "a", value: 1 })); + } + + #[test] + fn get() { + let source = &[("a", 1), ("b", 2), ("a", 1)] as &[_]; + assert_eq!( + Token::I64(1), + Source::get(source, Key::from_str("a")).unwrap().to_token() + ); + assert_eq!( + Token::I64(2), + Source::get(source, Key::from_str("b")).unwrap().to_token() + ); + assert!(Source::get(&source, Key::from_str("c")).is_none()); + + let source = Option::None::<(&str, i32)>; + assert!(Source::get(&source, Key::from_str("a")).is_none()); + } +} diff -Nru cargo-0.44.1/vendor/log/src/kv/value/fill.rs cargo-0.47.0/vendor/log/src/kv/value/fill.rs --- cargo-0.44.1/vendor/log/src/kv/value/fill.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/log/src/kv/value/fill.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,164 @@ +//! Lazy value initialization. + +use std::fmt; + +use super::internal::{Erased, Inner, Visitor}; +use super::{Error, Value}; + +impl<'v> Value<'v> { + /// Get a value from a fillable slot. + pub fn from_fill(value: &'v T) -> Self + where + T: Fill + 'static, + { + Value { + inner: Inner::Fill(unsafe { Erased::new_unchecked::(value) }), + } + } +} + +/// A type that requires extra work to convert into a [`Value`](struct.Value.html). +/// +/// This trait is a more advanced initialization API than [`ToValue`](trait.ToValue.html). +/// It's intended for erased values coming from other logging frameworks that may need +/// to perform extra work to determine the concrete type to use. +pub trait Fill { + /// Fill a value. + fn fill(&self, slot: &mut Slot) -> Result<(), Error>; +} + +impl<'a, T> Fill for &'a T +where + T: Fill + ?Sized, +{ + fn fill(&self, slot: &mut Slot) -> Result<(), Error> { + (**self).fill(slot) + } +} + +/// A value slot to fill using the [`Fill`](trait.Fill.html) trait. +pub struct Slot<'s, 'f> { + filled: bool, + visitor: &'s mut dyn Visitor<'f>, +} + +impl<'s, 'f> fmt::Debug for Slot<'s, 'f> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Slot").finish() + } +} + +impl<'s, 'f> Slot<'s, 'f> { + pub(super) fn new(visitor: &'s mut dyn Visitor<'f>) -> Self { + Slot { + visitor, + filled: false, + } + } + + pub(super) fn fill(&mut self, f: F) -> Result<(), Error> + where + F: FnOnce(&mut dyn Visitor<'f>) -> Result<(), Error>, + { + assert!(!self.filled, "the slot has already been filled"); + self.filled = true; + + f(self.visitor) + } + + /// Fill the slot with a value. + /// + /// The given value doesn't need to satisfy any particular lifetime constraints. + /// + /// # Panics + /// + /// Calling more than a single `fill` method on this slot will panic. + pub fn fill_any(&mut self, value: T) -> Result<(), Error> + where + T: Into>, + { + self.fill(|visitor| value.into().inner.visit(visitor)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn fill_value_borrowed() { + struct TestFill; + + impl Fill for TestFill { + fn fill(&self, slot: &mut Slot) -> Result<(), Error> { + let dbg: &dyn fmt::Debug = &1; + + slot.fill_debug(&dbg) + } + } + + assert_eq!("1", Value::from_fill(&TestFill).to_string()); + } + + #[test] + fn fill_value_owned() { + struct TestFill; + + impl Fill for TestFill { + fn fill(&self, slot: &mut Slot) -> Result<(), Error> { + slot.fill_any("a string") + } + } + } + + #[test] + #[should_panic] + fn fill_multiple_times_panics() { + struct BadFill; + + impl Fill for BadFill { + fn fill(&self, slot: &mut Slot) -> Result<(), Error> { + slot.fill_any(42)?; + slot.fill_any(6789)?; + + Ok(()) + } + } + + let _ = Value::from_fill(&BadFill).to_string(); + } + + #[test] + fn fill_cast() { + struct TestFill; + + impl Fill for TestFill { + fn fill(&self, slot: &mut Slot) -> Result<(), Error> { + slot.fill_any("a string") + } + } + + assert_eq!( + "a string", + Value::from_fill(&TestFill) + .to_borrowed_str() + .expect("invalid value") + ); + } + + #[test] + fn fill_debug() { + struct TestFill; + + impl Fill for TestFill { + fn fill(&self, slot: &mut Slot) -> Result<(), Error> { + slot.fill_any(42u64) + } + } + + assert_eq!( + format!("{:04?}", 42u64), + format!("{:04?}", Value::from_fill(&TestFill)), + ) + } +} diff -Nru cargo-0.44.1/vendor/log/src/kv/value/impls.rs cargo-0.47.0/vendor/log/src/kv/value/impls.rs --- cargo-0.44.1/vendor/log/src/kv/value/impls.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/src/kv/value/impls.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,269 +1,159 @@ -use std::fmt; - -use super::{ToValue, Value, Primitive}; - -impl ToValue for usize { - fn to_value(&self) -> Value { - Value::from(*self) - } -} - -impl<'v> From for Value<'v> { - fn from(value: usize) -> Self { - Value::from_primitive(Primitive::Unsigned(value as u64)) - } -} - -impl ToValue for isize { - fn to_value(&self) -> Value { - Value::from(*self) - } -} - -impl<'v> From for Value<'v> { - fn from(value: isize) -> Self { - Value::from_primitive(Primitive::Signed(value as i64)) - } -} - -impl ToValue for u8 { - fn to_value(&self) -> Value { - Value::from(*self) - } -} - -impl<'v> From for Value<'v> { - fn from(value: u8) -> Self { - Value::from_primitive(Primitive::Unsigned(value as u64)) - } -} - -impl ToValue for u16 { - fn to_value(&self) -> Value { - Value::from(*self) - } -} - -impl<'v> From for Value<'v> { - fn from(value: u16) -> Self { - Value::from_primitive(Primitive::Unsigned(value as u64)) - } -} - -impl ToValue for u32 { - fn to_value(&self) -> Value { - Value::from(*self) - } -} - -impl<'v> From for Value<'v> { - fn from(value: u32) -> Self { - Value::from_primitive(Primitive::Unsigned(value as u64)) - } -} - -impl ToValue for u64 { - fn to_value(&self) -> Value { - Value::from(*self) - } -} - -impl<'v> From for Value<'v> { - fn from(value: u64) -> Self { - Value::from_primitive(Primitive::Unsigned(value)) - } -} - -impl ToValue for i8 { - fn to_value(&self) -> Value { - Value::from(*self) - } -} - -impl<'v> From for Value<'v> { - fn from(value: i8) -> Self { - Value::from_primitive(Primitive::Signed(value as i64)) - } -} - -impl ToValue for i16 { - fn to_value(&self) -> Value { - Value::from(*self) - } -} - -impl<'v> From for Value<'v> { - fn from(value: i16) -> Self { - Value::from_primitive(Primitive::Signed(value as i64)) - } -} - -impl ToValue for i32 { - fn to_value(&self) -> Value { - Value::from(*self) - } -} - -impl<'v> From for Value<'v> { - fn from(value: i32) -> Self { - Value::from_primitive(Primitive::Signed(value as i64)) - } -} - -impl ToValue for i64 { - fn to_value(&self) -> Value { - Value::from(*self) - } -} - -impl<'v> From for Value<'v> { - fn from(value: i64) -> Self { - Value::from_primitive(Primitive::Signed(value)) - } -} - -impl ToValue for f32 { - fn to_value(&self) -> Value { - Value::from(*self) - } -} - -impl<'v> From for Value<'v> { - fn from(value: f32) -> Self { - Value::from_primitive(Primitive::Float(value as f64)) - } -} - -impl ToValue for f64 { - fn to_value(&self) -> Value { - Value::from(*self) - } -} - -impl<'v> From for Value<'v> { - fn from(value: f64) -> Self { - Value::from_primitive(Primitive::Float(value)) - } -} - -impl ToValue for bool { - fn to_value(&self) -> Value { - Value::from(*self) - } -} - -impl<'v> From for Value<'v> { - fn from(value: bool) -> Self { - Value::from_primitive(Primitive::Bool(value)) - } -} - -impl ToValue for char { - fn to_value(&self) -> Value { - Value::from(*self) - } -} - -impl<'v> From for Value<'v> { - fn from(value: char) -> Self { - Value::from_primitive(Primitive::Char(value)) - } -} - -impl<'v> ToValue for &'v str { - fn to_value(&self) -> Value { - Value::from(*self) - } -} - -impl<'v> From<&'v str> for Value<'v> { - fn from(value: &'v str) -> Self { - Value::from_primitive(Primitive::Str(value)) - } -} - -impl ToValue for () { - fn to_value(&self) -> Value { - Value::from_primitive(Primitive::None) - } -} - -impl ToValue for Option -where - T: ToValue, -{ - fn to_value(&self) -> Value { - match *self { - Some(ref value) => value.to_value(), - None => Value::from_primitive(Primitive::None), - } - } -} - -impl<'v> ToValue for fmt::Arguments<'v> { - fn to_value(&self) -> Value { - Value::from_debug(self) - } -} - -#[cfg(feature = "std")] -mod std_support { - use super::*; - - use std::borrow::Cow; - - impl ToValue for Box - where - T: ToValue + ?Sized, - { - fn to_value(&self) -> Value { - (**self).to_value() - } - } - - impl ToValue for String { - fn to_value(&self) -> Value { - Value::from_primitive(Primitive::Str(&*self)) - } - } - - impl<'v> ToValue for Cow<'v, str> { - fn to_value(&self) -> Value { - Value::from_primitive(Primitive::Str(&*self)) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use kv::value::test::Token; - - #[test] - fn test_to_value_display() { - assert_eq!(42u64.to_value().to_string(), "42"); - assert_eq!(42i64.to_value().to_string(), "42"); - assert_eq!(42.01f64.to_value().to_string(), "42.01"); - assert_eq!(true.to_value().to_string(), "true"); - assert_eq!('a'.to_value().to_string(), "'a'"); - assert_eq!(format_args!("a {}", "value").to_value().to_string(), "a value"); - assert_eq!("a loong string".to_value().to_string(), "\"a loong string\""); - assert_eq!(Some(true).to_value().to_string(), "true"); - assert_eq!(().to_value().to_string(), "None"); - assert_eq!(Option::None::.to_value().to_string(), "None"); - } - - #[test] - fn test_to_value_structured() { - assert_eq!(42u64.to_value().to_token(), Token::U64(42)); - assert_eq!(42i64.to_value().to_token(), Token::I64(42)); - assert_eq!(42.01f64.to_value().to_token(), Token::F64(42.01)); - assert_eq!(true.to_value().to_token(), Token::Bool(true)); - assert_eq!('a'.to_value().to_token(), Token::Char('a')); - assert_eq!(format_args!("a {}", "value").to_value().to_token(), Token::Str("a value".into())); - assert_eq!("a loong string".to_value().to_token(), Token::Str("a loong string".into())); - assert_eq!(Some(true).to_value().to_token(), Token::Bool(true)); - assert_eq!(().to_value().to_token(), Token::None); - assert_eq!(Option::None::.to_value().to_token(), Token::None); - } -} +//! Converting standard types into `Value`s. +//! +//! This module provides `ToValue` implementations for commonly +//! logged types from the standard library. + +use std::fmt; + +use super::{Primitive, ToValue, Value}; + +macro_rules! impl_into_owned { + ($($into_ty:ty => $convert:ident,)*) => { + $( + impl ToValue for $into_ty { + fn to_value(&self) -> Value { + Value::from(*self) + } + } + + impl<'v> From<$into_ty> for Value<'v> { + fn from(value: $into_ty) -> Self { + Value::from_primitive(value as $convert) + } + } + )* + }; +} + +impl<'v> ToValue for &'v str { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From<&'v str> for Value<'v> { + fn from(value: &'v str) -> Self { + Value::from_primitive(value) + } +} + +impl<'v> ToValue for fmt::Arguments<'v> { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From> for Value<'v> { + fn from(value: fmt::Arguments<'v>) -> Self { + Value::from_primitive(value) + } +} + +impl ToValue for () { + fn to_value(&self) -> Value { + Value::from_primitive(Primitive::None) + } +} + +impl ToValue for Option +where + T: ToValue, +{ + fn to_value(&self) -> Value { + match *self { + Some(ref value) => value.to_value(), + None => Value::from_primitive(Primitive::None), + } + } +} + +impl_into_owned! [ + usize => u64, + u8 => u64, + u16 => u64, + u32 => u64, + u64 => u64, + + isize => i64, + i8 => i64, + i16 => i64, + i32 => i64, + i64 => i64, + + f32 => f64, + f64 => f64, + + char => char, + bool => bool, +]; + +#[cfg(feature = "std")] +mod std_support { + use super::*; + + use std::borrow::Cow; + + impl ToValue for Box + where + T: ToValue + ?Sized, + { + fn to_value(&self) -> Value { + (**self).to_value() + } + } + + impl ToValue for String { + fn to_value(&self) -> Value { + Value::from_primitive(Primitive::Str(&*self)) + } + } + + impl<'v> ToValue for Cow<'v, str> { + fn to_value(&self) -> Value { + Value::from_primitive(Primitive::Str(&*self)) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use kv::value::test::Token; + + #[test] + fn test_to_value_display() { + assert_eq!(42u64.to_value().to_string(), "42"); + assert_eq!(42i64.to_value().to_string(), "42"); + assert_eq!(42.01f64.to_value().to_string(), "42.01"); + assert_eq!(true.to_value().to_string(), "true"); + assert_eq!('a'.to_value().to_string(), "a"); + assert_eq!( + format_args!("a {}", "value").to_value().to_string(), + "a value" + ); + assert_eq!("a loong string".to_value().to_string(), "a loong string"); + assert_eq!(Some(true).to_value().to_string(), "true"); + assert_eq!(().to_value().to_string(), "None"); + assert_eq!(Option::None::.to_value().to_string(), "None"); + } + + #[test] + fn test_to_value_structured() { + assert_eq!(42u64.to_value().to_token(), Token::U64(42)); + assert_eq!(42i64.to_value().to_token(), Token::I64(42)); + assert_eq!(42.01f64.to_value().to_token(), Token::F64(42.01)); + assert_eq!(true.to_value().to_token(), Token::Bool(true)); + assert_eq!('a'.to_value().to_token(), Token::Char('a')); + assert_eq!( + format_args!("a {}", "value").to_value().to_token(), + Token::Str("a value".into()) + ); + assert_eq!( + "a loong string".to_value().to_token(), + Token::Str("a loong string".into()) + ); + assert_eq!(Some(true).to_value().to_token(), Token::Bool(true)); + assert_eq!(().to_value().to_token(), Token::None); + assert_eq!(Option::None::.to_value().to_token(), Token::None); + } +} diff -Nru cargo-0.44.1/vendor/log/src/kv/value/internal/cast.rs cargo-0.47.0/vendor/log/src/kv/value/internal/cast.rs --- cargo-0.44.1/vendor/log/src/kv/value/internal/cast.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/log/src/kv/value/internal/cast.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,475 @@ +//! Coerce a `Value` into some concrete types. +//! +//! These operations are cheap when the captured value is a simple primitive, +//! but may end up executing arbitrary caller code if the value is complex. +//! They will also attempt to downcast erased types into a primitive where possible. + +use std::any::TypeId; +use std::fmt; + +use super::{Erased, Inner, Primitive, Visitor}; +use crate::kv::value::{Error, Value}; + +impl<'v> Value<'v> { + /// Try get a `usize` from this value. + /// + /// This method is cheap for primitive types, but may call arbitrary + /// serialization implementations for complex ones. + pub fn to_usize(&self) -> Option { + self.inner + .cast() + .into_primitive() + .into_u64() + .map(|v| v as usize) + } + + /// Try get a `u8` from this value. + /// + /// This method is cheap for primitive types, but may call arbitrary + /// serialization implementations for complex ones. + pub fn to_u8(&self) -> Option { + self.inner + .cast() + .into_primitive() + .into_u64() + .map(|v| v as u8) + } + + /// Try get a `u16` from this value. + /// + /// This method is cheap for primitive types, but may call arbitrary + /// serialization implementations for complex ones. + pub fn to_u16(&self) -> Option { + self.inner + .cast() + .into_primitive() + .into_u64() + .map(|v| v as u16) + } + + /// Try get a `u32` from this value. + /// + /// This method is cheap for primitive types, but may call arbitrary + /// serialization implementations for complex ones. + pub fn to_u32(&self) -> Option { + self.inner + .cast() + .into_primitive() + .into_u64() + .map(|v| v as u32) + } + + /// Try get a `u64` from this value. + /// + /// This method is cheap for primitive types, but may call arbitrary + /// serialization implementations for complex ones. + pub fn to_u64(&self) -> Option { + self.inner.cast().into_primitive().into_u64() + } + + /// Try get a `isize` from this value. + /// + /// This method is cheap for primitive types, but may call arbitrary + /// serialization implementations for complex ones. + pub fn to_isize(&self) -> Option { + self.inner + .cast() + .into_primitive() + .into_i64() + .map(|v| v as isize) + } + + /// Try get a `i8` from this value. + /// + /// This method is cheap for primitive types, but may call arbitrary + /// serialization implementations for complex ones. + pub fn to_i8(&self) -> Option { + self.inner + .cast() + .into_primitive() + .into_i64() + .map(|v| v as i8) + } + + /// Try get a `i16` from this value. + /// + /// This method is cheap for primitive types, but may call arbitrary + /// serialization implementations for complex ones. + pub fn to_i16(&self) -> Option { + self.inner + .cast() + .into_primitive() + .into_i64() + .map(|v| v as i16) + } + + /// Try get a `i32` from this value. + /// + /// This method is cheap for primitive types, but may call arbitrary + /// serialization implementations for complex ones. + pub fn to_i32(&self) -> Option { + self.inner + .cast() + .into_primitive() + .into_i64() + .map(|v| v as i32) + } + + /// Try get a `i64` from this value. + /// + /// This method is cheap for primitive types, but may call arbitrary + /// serialization implementations for complex ones. + pub fn to_i64(&self) -> Option { + self.inner.cast().into_primitive().into_i64() + } + + /// Try get a `f32` from this value. + /// + /// This method is cheap for primitive types, but may call arbitrary + /// serialization implementations for complex ones. + pub fn to_f32(&self) -> Option { + self.inner + .cast() + .into_primitive() + .into_f64() + .map(|v| v as f32) + } + + /// Try get a `f64` from this value. + /// + /// This method is cheap for primitive types, but may call arbitrary + /// serialization implementations for complex ones. + pub fn to_f64(&self) -> Option { + self.inner.cast().into_primitive().into_f64() + } + + /// Try get a `bool` from this value. + /// + /// This method is cheap for primitive types, but may call arbitrary + /// serialization implementations for complex ones. + pub fn to_bool(&self) -> Option { + self.inner.cast().into_primitive().into_bool() + } + + /// Try get a `char` from this value. + /// + /// This method is cheap for primitive types, but may call arbitrary + /// serialization implementations for complex ones. + pub fn to_char(&self) -> Option { + self.inner.cast().into_primitive().into_char() + } + + /// Try get a `str` from this value. + /// + /// This method is cheap for primitive types. It won't allocate an owned + /// `String` if the value is a complex type. + pub fn to_borrowed_str(&self) -> Option<&str> { + self.inner.cast().into_primitive().into_borrowed_str() + } +} + +impl<'v> Inner<'v> { + /// Cast the inner value to another type. + fn cast(self) -> Cast<'v> { + struct CastVisitor<'v>(Cast<'v>); + + impl<'v> Visitor<'v> for CastVisitor<'v> { + fn debug(&mut self, _: &dyn fmt::Debug) -> Result<(), Error> { + Ok(()) + } + + fn u64(&mut self, v: u64) -> Result<(), Error> { + self.0 = Cast::Primitive(Primitive::Unsigned(v)); + Ok(()) + } + + fn i64(&mut self, v: i64) -> Result<(), Error> { + self.0 = Cast::Primitive(Primitive::Signed(v)); + Ok(()) + } + + fn f64(&mut self, v: f64) -> Result<(), Error> { + self.0 = Cast::Primitive(Primitive::Float(v)); + Ok(()) + } + + fn bool(&mut self, v: bool) -> Result<(), Error> { + self.0 = Cast::Primitive(Primitive::Bool(v)); + Ok(()) + } + + fn char(&mut self, v: char) -> Result<(), Error> { + self.0 = Cast::Primitive(Primitive::Char(v)); + Ok(()) + } + + fn borrowed_str(&mut self, v: &'v str) -> Result<(), Error> { + self.0 = Cast::Primitive(Primitive::Str(v)); + Ok(()) + } + + #[cfg(not(feature = "std"))] + fn str(&mut self, _: &str) -> Result<(), Error> { + Ok(()) + } + + #[cfg(feature = "std")] + fn str(&mut self, v: &str) -> Result<(), Error> { + self.0 = Cast::String(v.into()); + Ok(()) + } + + fn none(&mut self) -> Result<(), Error> { + self.0 = Cast::Primitive(Primitive::None); + Ok(()) + } + + #[cfg(feature = "kv_unstable_sval")] + fn sval(&mut self, v: &dyn super::sval::Value) -> Result<(), Error> { + self.0 = super::sval::cast(v); + Ok(()) + } + } + + // Try downcast an erased value first + // It also lets us avoid the Visitor infrastructure for simple primitives + let primitive = match self { + Inner::Primitive(value) => Some(value), + Inner::Fill(value) => value.downcast_primitive(), + Inner::Debug(value) => value.downcast_primitive(), + Inner::Display(value) => value.downcast_primitive(), + + #[cfg(feature = "sval")] + Inner::Sval(value) => value.downcast_primitive(), + }; + + primitive.map(Cast::Primitive).unwrap_or_else(|| { + // If the erased value isn't a primitive then we visit it + let mut cast = CastVisitor(Cast::Primitive(Primitive::None)); + let _ = self.visit(&mut cast); + cast.0 + }) + } +} + +pub(super) enum Cast<'v> { + Primitive(Primitive<'v>), + #[cfg(feature = "std")] + String(String), +} + +impl<'v> Cast<'v> { + fn into_primitive(self) -> Primitive<'v> { + match self { + Cast::Primitive(value) => value, + #[cfg(feature = "std")] + _ => Primitive::None, + } + } +} + +impl<'v> Primitive<'v> { + fn into_borrowed_str(self) -> Option<&'v str> { + if let Primitive::Str(value) = self { + Some(value) + } else { + None + } + } + + fn into_u64(self) -> Option { + match self { + Primitive::Unsigned(value) => Some(value), + Primitive::Signed(value) => Some(value as u64), + Primitive::Float(value) => Some(value as u64), + _ => None, + } + } + + fn into_i64(self) -> Option { + match self { + Primitive::Signed(value) => Some(value), + Primitive::Unsigned(value) => Some(value as i64), + Primitive::Float(value) => Some(value as i64), + _ => None, + } + } + + fn into_f64(self) -> Option { + match self { + Primitive::Float(value) => Some(value), + Primitive::Unsigned(value) => Some(value as f64), + Primitive::Signed(value) => Some(value as f64), + _ => None, + } + } + + fn into_char(self) -> Option { + if let Primitive::Char(value) = self { + Some(value) + } else { + None + } + } + + fn into_bool(self) -> Option { + if let Primitive::Bool(value) = self { + Some(value) + } else { + None + } + } +} + +impl<'v, T: ?Sized + 'static> Erased<'v, T> { + // NOTE: This function is a perfect candidate for memoization + // The outcome could be stored in a `Cell` + fn downcast_primitive(self) -> Option> { + macro_rules! type_ids { + ($($value:ident : $ty:ty => $cast:expr,)*) => {{ + struct TypeIds; + + impl TypeIds { + fn downcast_primitive<'v, T: ?Sized>(&self, value: Erased<'v, T>) -> Option> { + $( + if TypeId::of::<$ty>() == value.type_id { + let $value = unsafe { value.downcast_unchecked::<$ty>() }; + return Some(Primitive::from($cast)); + } + )* + + None + } + } + + TypeIds + }}; + } + + let type_ids = type_ids![ + value: usize => *value as u64, + value: u8 => *value as u64, + value: u16 => *value as u64, + value: u32 => *value as u64, + value: u64 => *value, + + value: isize => *value as i64, + value: i8 => *value as i64, + value: i16 => *value as i64, + value: i32 => *value as i64, + value: i64 => *value, + + value: f32 => *value as f64, + value: f64 => *value, + + value: char => *value, + value: bool => *value, + + value: &str => *value, + ]; + + type_ids.downcast_primitive(self) + } +} + +#[cfg(feature = "std")] +mod std_support { + use super::*; + + use std::borrow::Cow; + + impl<'v> Value<'v> { + /// Try get a `usize` from this value. + /// + /// This method is cheap for primitive types, but may call arbitrary + /// serialization implementations for complex ones. If the serialization + /// implementation produces a short lived string it will be allocated. + pub fn to_str(&self) -> Option> { + self.inner.cast().into_str() + } + } + + impl<'v> Cast<'v> { + pub(super) fn into_str(self) -> Option> { + match self { + Cast::Primitive(Primitive::Str(value)) => Some(value.into()), + Cast::String(value) => Some(value.into()), + _ => None, + } + } + } + + #[cfg(test)] + mod tests { + use crate::kv::ToValue; + + #[test] + fn primitive_cast() { + assert_eq!( + "a string", + "a string" + .to_owned() + .to_value() + .to_borrowed_str() + .expect("invalid value") + ); + assert_eq!( + "a string", + &*"a string".to_value().to_str().expect("invalid value") + ); + assert_eq!( + "a string", + &*"a string" + .to_owned() + .to_value() + .to_str() + .expect("invalid value") + ); + } + } +} + +#[cfg(test)] +mod tests { + use crate::kv::ToValue; + + #[test] + fn primitive_cast() { + assert_eq!( + "a string", + "a string" + .to_value() + .to_borrowed_str() + .expect("invalid value") + ); + assert_eq!( + "a string", + Some("a string") + .to_value() + .to_borrowed_str() + .expect("invalid value") + ); + + assert_eq!(1u8, 1u64.to_value().to_u8().expect("invalid value")); + assert_eq!(1u16, 1u64.to_value().to_u16().expect("invalid value")); + assert_eq!(1u32, 1u64.to_value().to_u32().expect("invalid value")); + assert_eq!(1u64, 1u64.to_value().to_u64().expect("invalid value")); + assert_eq!(1usize, 1u64.to_value().to_usize().expect("invalid value")); + + assert_eq!(-1i8, -1i64.to_value().to_i8().expect("invalid value")); + assert_eq!(-1i16, -1i64.to_value().to_i16().expect("invalid value")); + assert_eq!(-1i32, -1i64.to_value().to_i32().expect("invalid value")); + assert_eq!(-1i64, -1i64.to_value().to_i64().expect("invalid value")); + assert_eq!(-1isize, -1i64.to_value().to_isize().expect("invalid value")); + + assert!(1f32.to_value().to_f32().is_some(), "invalid value"); + assert!(1f64.to_value().to_f64().is_some(), "invalid value"); + + assert_eq!(1u32, 1i64.to_value().to_u32().expect("invalid value")); + assert_eq!(1i32, 1u64.to_value().to_i32().expect("invalid value")); + assert!(1f32.to_value().to_i32().is_some(), "invalid value"); + + assert_eq!('a', 'a'.to_value().to_char().expect("invalid value")); + assert_eq!(true, true.to_value().to_bool().expect("invalid value")); + } +} diff -Nru cargo-0.44.1/vendor/log/src/kv/value/internal/fmt.rs cargo-0.47.0/vendor/log/src/kv/value/internal/fmt.rs --- cargo-0.44.1/vendor/log/src/kv/value/internal/fmt.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/log/src/kv/value/internal/fmt.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,249 @@ +//! Integration between `Value` and `std::fmt`. +//! +//! This module allows any `Value` to implement the `fmt::Debug` and `fmt::Display` traits, +//! and for any `fmt::Debug` or `fmt::Display` to be captured as a `Value`. + +use std::fmt; + +use super::{Erased, Inner, Visitor}; +use crate::kv; +use crate::kv::value::{Error, Slot}; + +impl<'v> kv::Value<'v> { + /// Get a value from a debuggable type. + pub fn from_debug(value: &'v T) -> Self + where + T: fmt::Debug + 'static, + { + kv::Value { + inner: Inner::Debug(unsafe { Erased::new_unchecked::(value) }), + } + } + + /// Get a value from a displayable type. + pub fn from_display(value: &'v T) -> Self + where + T: fmt::Display + 'static, + { + kv::Value { + inner: Inner::Display(unsafe { Erased::new_unchecked::(value) }), + } + } +} + +impl<'s, 'f> Slot<'s, 'f> { + /// Fill the slot with a debuggable value. + /// + /// The given value doesn't need to satisfy any particular lifetime constraints. + /// + /// # Panics + /// + /// Calling more than a single `fill` method on this slot will panic. + pub fn fill_debug(&mut self, value: T) -> Result<(), Error> + where + T: fmt::Debug, + { + self.fill(|visitor| visitor.debug(&value)) + } + + /// Fill the slot with a displayable value. + /// + /// The given value doesn't need to satisfy any particular lifetime constraints. + /// + /// # Panics + /// + /// Calling more than a single `fill` method on this slot will panic. + pub fn fill_display(&mut self, value: T) -> Result<(), Error> + where + T: fmt::Display, + { + self.fill(|visitor| visitor.display(&value)) + } +} + +pub(in kv::value) use self::fmt::{Arguments, Debug, Display}; + +impl<'v> fmt::Debug for kv::Value<'v> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + struct DebugVisitor<'a, 'b: 'a>(&'a mut fmt::Formatter<'b>); + + impl<'a, 'b: 'a, 'v> Visitor<'v> for DebugVisitor<'a, 'b> { + fn debug(&mut self, v: &dyn fmt::Debug) -> Result<(), Error> { + fmt::Debug::fmt(v, self.0)?; + + Ok(()) + } + + fn display(&mut self, v: &dyn fmt::Display) -> Result<(), Error> { + fmt::Display::fmt(v, self.0)?; + + Ok(()) + } + + fn u64(&mut self, v: u64) -> Result<(), Error> { + fmt::Debug::fmt(&v, self.0)?; + + Ok(()) + } + + fn i64(&mut self, v: i64) -> Result<(), Error> { + fmt::Debug::fmt(&v, self.0)?; + + Ok(()) + } + + fn f64(&mut self, v: f64) -> Result<(), Error> { + fmt::Debug::fmt(&v, self.0)?; + + Ok(()) + } + + fn bool(&mut self, v: bool) -> Result<(), Error> { + fmt::Debug::fmt(&v, self.0)?; + + Ok(()) + } + + fn char(&mut self, v: char) -> Result<(), Error> { + fmt::Debug::fmt(&v, self.0)?; + + Ok(()) + } + + fn str(&mut self, v: &str) -> Result<(), Error> { + fmt::Debug::fmt(&v, self.0)?; + + Ok(()) + } + + fn none(&mut self) -> Result<(), Error> { + self.debug(&format_args!("None")) + } + + #[cfg(feature = "kv_unstable_sval")] + fn sval(&mut self, v: &dyn super::sval::Value) -> Result<(), Error> { + super::sval::fmt(self.0, v) + } + } + + self.visit(&mut DebugVisitor(f)).map_err(|_| fmt::Error)?; + + Ok(()) + } +} + +impl<'v> fmt::Display for kv::Value<'v> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + struct DisplayVisitor<'a, 'b: 'a>(&'a mut fmt::Formatter<'b>); + + impl<'a, 'b: 'a, 'v> Visitor<'v> for DisplayVisitor<'a, 'b> { + fn debug(&mut self, v: &dyn fmt::Debug) -> Result<(), Error> { + fmt::Debug::fmt(v, self.0)?; + + Ok(()) + } + + fn display(&mut self, v: &dyn fmt::Display) -> Result<(), Error> { + fmt::Display::fmt(v, self.0)?; + + Ok(()) + } + + fn u64(&mut self, v: u64) -> Result<(), Error> { + fmt::Display::fmt(&v, self.0)?; + + Ok(()) + } + + fn i64(&mut self, v: i64) -> Result<(), Error> { + fmt::Display::fmt(&v, self.0)?; + + Ok(()) + } + + fn f64(&mut self, v: f64) -> Result<(), Error> { + fmt::Display::fmt(&v, self.0)?; + + Ok(()) + } + + fn bool(&mut self, v: bool) -> Result<(), Error> { + fmt::Display::fmt(&v, self.0)?; + + Ok(()) + } + + fn char(&mut self, v: char) -> Result<(), Error> { + fmt::Display::fmt(&v, self.0)?; + + Ok(()) + } + + fn str(&mut self, v: &str) -> Result<(), Error> { + fmt::Display::fmt(&v, self.0)?; + + Ok(()) + } + + fn none(&mut self) -> Result<(), Error> { + self.debug(&format_args!("None")) + } + + #[cfg(feature = "kv_unstable_sval")] + fn sval(&mut self, v: &dyn super::sval::Value) -> Result<(), Error> { + super::sval::fmt(self.0, v) + } + } + + self.visit(&mut DisplayVisitor(f)).map_err(|_| fmt::Error)?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use crate::kv::value::ToValue; + + #[test] + fn fmt_cast() { + assert_eq!( + 42u32, + kv::Value::from_debug(&42u64) + .to_u32() + .expect("invalid value") + ); + + assert_eq!( + "a string", + kv::Value::from_display(&"a string") + .to_borrowed_str() + .expect("invalid value") + ); + } + + #[test] + fn fmt_debug() { + assert_eq!( + format!("{:?}", "a string"), + format!("{:?}", "a string".to_value()), + ); + + assert_eq!( + format!("{:04?}", 42u64), + format!("{:04?}", 42u64.to_value()), + ); + } + + #[test] + fn fmt_display() { + assert_eq!( + format!("{}", "a string"), + format!("{}", "a string".to_value()), + ); + + assert_eq!(format!("{:04}", 42u64), format!("{:04}", 42u64.to_value()),); + } +} diff -Nru cargo-0.44.1/vendor/log/src/kv/value/internal/mod.rs cargo-0.47.0/vendor/log/src/kv/value/internal/mod.rs --- cargo-0.44.1/vendor/log/src/kv/value/internal/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/log/src/kv/value/internal/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,181 @@ +//! The internal `Value` serialization API. +//! +//! This implementation isn't intended to be public. It may need to change +//! for optimizations or to support new external serialization frameworks. + +use std::any::TypeId; + +use super::{Error, Fill, Slot}; + +pub(super) mod cast; +pub(super) mod fmt; +#[cfg(feature = "kv_unstable_sval")] +pub(super) mod sval; + +/// A container for a structured value for a specific kind of visitor. +#[derive(Clone, Copy)] +pub(super) enum Inner<'v> { + /// A simple primitive value that can be copied without allocating. + Primitive(Primitive<'v>), + /// A value that can be filled. + Fill(Erased<'v, dyn Fill + 'static>), + /// A debuggable value. + Debug(Erased<'v, dyn fmt::Debug + 'static>), + /// A displayable value. + Display(Erased<'v, dyn fmt::Display + 'static>), + + #[cfg(feature = "kv_unstable_sval")] + /// A structured value from `sval`. + Sval(Erased<'v, dyn sval::Value + 'static>), +} + +impl<'v> Inner<'v> { + pub(super) fn visit(self, visitor: &mut dyn Visitor<'v>) -> Result<(), Error> { + match self { + Inner::Primitive(value) => value.visit(visitor), + Inner::Fill(value) => value.get().fill(&mut Slot::new(visitor)), + Inner::Debug(value) => visitor.debug(value.get()), + Inner::Display(value) => visitor.display(value.get()), + + #[cfg(feature = "kv_unstable_sval")] + Inner::Sval(value) => visitor.sval(value.get()), + } + } +} + +/// The internal serialization contract. +pub(super) trait Visitor<'v> { + fn debug(&mut self, v: &dyn fmt::Debug) -> Result<(), Error>; + fn display(&mut self, v: &dyn fmt::Display) -> Result<(), Error> { + self.debug(&format_args!("{}", v)) + } + + fn u64(&mut self, v: u64) -> Result<(), Error>; + fn i64(&mut self, v: i64) -> Result<(), Error>; + fn f64(&mut self, v: f64) -> Result<(), Error>; + fn bool(&mut self, v: bool) -> Result<(), Error>; + fn char(&mut self, v: char) -> Result<(), Error>; + + fn str(&mut self, v: &str) -> Result<(), Error>; + fn borrowed_str(&mut self, v: &'v str) -> Result<(), Error> { + self.str(v) + } + + fn none(&mut self) -> Result<(), Error>; + + #[cfg(feature = "kv_unstable_sval")] + fn sval(&mut self, v: &dyn sval::Value) -> Result<(), Error>; +} + +/// A captured primitive value. +/// +/// These values are common and cheap to copy around. +#[derive(Clone, Copy)] +pub(super) enum Primitive<'v> { + Signed(i64), + Unsigned(u64), + Float(f64), + Bool(bool), + Char(char), + Str(&'v str), + Fmt(fmt::Arguments<'v>), + None, +} + +impl<'v> Primitive<'v> { + fn visit(self, visitor: &mut dyn Visitor<'v>) -> Result<(), Error> { + match self { + Primitive::Signed(value) => visitor.i64(value), + Primitive::Unsigned(value) => visitor.u64(value), + Primitive::Float(value) => visitor.f64(value), + Primitive::Bool(value) => visitor.bool(value), + Primitive::Char(value) => visitor.char(value), + Primitive::Str(value) => visitor.borrowed_str(value), + Primitive::Fmt(value) => visitor.debug(&value), + Primitive::None => visitor.none(), + } + } +} + +impl<'v> From for Primitive<'v> { + fn from(v: u64) -> Self { + Primitive::Unsigned(v) + } +} + +impl<'v> From for Primitive<'v> { + fn from(v: i64) -> Self { + Primitive::Signed(v) + } +} + +impl<'v> From for Primitive<'v> { + fn from(v: f64) -> Self { + Primitive::Float(v) + } +} + +impl<'v> From for Primitive<'v> { + fn from(v: bool) -> Self { + Primitive::Bool(v) + } +} + +impl<'v> From for Primitive<'v> { + fn from(v: char) -> Self { + Primitive::Char(v) + } +} + +impl<'v> From<&'v str> for Primitive<'v> { + fn from(v: &'v str) -> Self { + Primitive::Str(v) + } +} + +impl<'v> From> for Primitive<'v> { + fn from(v: fmt::Arguments<'v>) -> Self { + Primitive::Fmt(v) + } +} + +/// A downcastable dynamic type. +pub(super) struct Erased<'v, T: ?Sized> { + type_id: TypeId, + inner: &'v T, +} + +impl<'v, T: ?Sized> Clone for Erased<'v, T> { + fn clone(&self) -> Self { + Erased { + type_id: self.type_id, + inner: self.inner, + } + } +} + +impl<'v, T: ?Sized> Copy for Erased<'v, T> {} + +impl<'v, T: ?Sized> Erased<'v, T> { + // SAFETY: `U: Unsize` and the underlying value `T` must not change + // We could add a safe variant of this method with the `Unsize` trait + pub(super) unsafe fn new_unchecked(inner: &'v T) -> Self + where + U: 'static, + T: 'static, + { + Erased { + type_id: TypeId::of::(), + inner, + } + } + + pub(super) fn get(self) -> &'v T { + self.inner + } + + // SAFETY: The underlying type of `T` is `U` + pub(super) unsafe fn downcast_unchecked(self) -> &'v U { + &*(self.inner as *const T as *const U) + } +} diff -Nru cargo-0.44.1/vendor/log/src/kv/value/internal/sval.rs cargo-0.47.0/vendor/log/src/kv/value/internal/sval.rs --- cargo-0.44.1/vendor/log/src/kv/value/internal/sval.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/log/src/kv/value/internal/sval.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,210 @@ +//! Integration between `Value` and `sval`. +//! +//! This module allows any `Value` to implement the `sval::Value` trait, +//! and for any `sval::Value` to be captured as a `Value`. + +extern crate sval; + +use std::fmt; + +use super::cast::Cast; +use super::{Erased, Inner, Primitive, Visitor}; +use crate::kv; +use crate::kv::value::{Error, Slot}; + +impl<'v> kv::Value<'v> { + /// Get a value from a structured type. + pub fn from_sval(value: &'v T) -> Self + where + T: sval::Value + 'static, + { + kv::Value { + inner: Inner::Sval(unsafe { Erased::new_unchecked::(value) }), + } + } +} + +impl<'s, 'f> Slot<'s, 'f> { + /// Fill the slot with a structured value. + /// + /// The given value doesn't need to satisfy any particular lifetime constraints. + /// + /// # Panics + /// + /// Calling more than a single `fill` method on this slot will panic. + pub fn fill_sval(&mut self, value: T) -> Result<(), Error> + where + T: sval::Value, + { + self.fill(|visitor| visitor.sval(&value)) + } +} + +impl<'v> sval::Value for kv::Value<'v> { + fn stream(&self, s: &mut sval::value::Stream) -> sval::value::Result { + struct SvalVisitor<'a, 'b: 'a>(&'a mut sval::value::Stream<'b>); + + impl<'a, 'b: 'a, 'v> Visitor<'v> for SvalVisitor<'a, 'b> { + fn debug(&mut self, v: &dyn fmt::Debug) -> Result<(), Error> { + self.0 + .fmt(format_args!("{:?}", v)) + .map_err(Error::from_sval) + } + + fn u64(&mut self, v: u64) -> Result<(), Error> { + self.0.u64(v).map_err(Error::from_sval) + } + + fn i64(&mut self, v: i64) -> Result<(), Error> { + self.0.i64(v).map_err(Error::from_sval) + } + + fn f64(&mut self, v: f64) -> Result<(), Error> { + self.0.f64(v).map_err(Error::from_sval) + } + + fn bool(&mut self, v: bool) -> Result<(), Error> { + self.0.bool(v).map_err(Error::from_sval) + } + + fn char(&mut self, v: char) -> Result<(), Error> { + self.0.char(v).map_err(Error::from_sval) + } + + fn str(&mut self, v: &str) -> Result<(), Error> { + self.0.str(v).map_err(Error::from_sval) + } + + fn none(&mut self) -> Result<(), Error> { + self.0.none().map_err(Error::from_sval) + } + + fn sval(&mut self, v: &dyn sval::Value) -> Result<(), Error> { + self.0.any(v).map_err(Error::from_sval) + } + } + + self.visit(&mut SvalVisitor(s)).map_err(Error::into_sval)?; + + Ok(()) + } +} + +pub(in kv::value) use self::sval::Value; + +pub(super) fn fmt(f: &mut fmt::Formatter, v: &dyn sval::Value) -> Result<(), Error> { + sval::fmt::debug(f, v)?; + Ok(()) +} + +pub(super) fn cast<'v>(v: &dyn sval::Value) -> Cast<'v> { + struct CastStream<'v>(Cast<'v>); + + impl<'v> sval::Stream for CastStream<'v> { + fn u64(&mut self, v: u64) -> sval::stream::Result { + self.0 = Cast::Primitive(Primitive::Unsigned(v)); + Ok(()) + } + + fn i64(&mut self, v: i64) -> sval::stream::Result { + self.0 = Cast::Primitive(Primitive::Signed(v)); + Ok(()) + } + + fn f64(&mut self, v: f64) -> sval::stream::Result { + self.0 = Cast::Primitive(Primitive::Float(v)); + Ok(()) + } + + fn char(&mut self, v: char) -> sval::stream::Result { + self.0 = Cast::Primitive(Primitive::Char(v)); + Ok(()) + } + + fn bool(&mut self, v: bool) -> sval::stream::Result { + self.0 = Cast::Primitive(Primitive::Bool(v)); + Ok(()) + } + + #[cfg(feature = "std")] + fn str(&mut self, s: &str) -> sval::stream::Result { + self.0 = Cast::String(s.into()); + Ok(()) + } + } + + let mut cast = CastStream(Cast::Primitive(Primitive::None)); + let _ = sval::stream(&mut cast, v); + + cast.0 +} + +impl Error { + fn from_sval(_: sval::value::Error) -> Self { + Error::msg("`sval` serialization failed") + } + + fn into_sval(self) -> sval::value::Error { + sval::value::Error::msg("`sval` serialization failed") + } +} + +#[cfg(test)] +mod tests { + use super::*; + use kv::value::test::Token; + + #[test] + fn test_from_sval() { + assert_eq!(kv::Value::from_sval(&42u64).to_token(), Token::Sval); + } + + #[test] + fn test_sval_structured() { + let value = kv::Value::from(42u64); + let expected = vec![sval::test::Token::Unsigned(42)]; + + assert_eq!(sval::test::tokens(value), expected); + } + + #[test] + fn sval_cast() { + assert_eq!( + 42u32, + kv::Value::from_sval(&42u64) + .to_u32() + .expect("invalid value") + ); + + assert_eq!( + "a string", + kv::Value::from_sval(&"a string") + .to_borrowed_str() + .expect("invalid value") + ); + + #[cfg(feature = "std")] + assert_eq!( + "a string", + kv::Value::from_sval(&"a string") + .to_str() + .expect("invalid value") + ); + } + + #[test] + fn sval_debug() { + struct TestSval; + + impl sval::Value for TestSval { + fn stream(&self, stream: &mut sval::value::Stream) -> sval::value::Result { + stream.u64(42) + } + } + + assert_eq!( + format!("{:04?}", 42u64), + format!("{:04?}", kv::Value::from_sval(&TestSval)), + ); + } +} diff -Nru cargo-0.44.1/vendor/log/src/kv/value/internal.rs cargo-0.47.0/vendor/log/src/kv/value/internal.rs --- cargo-0.44.1/vendor/log/src/kv/value/internal.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/src/kv/value/internal.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,264 +0,0 @@ -use std::fmt; - -use super::{Fill, Slot, Error}; -use kv; - -// `Visitor` is an internal API for visiting the structure of a value. -// It's not intended to be public (at this stage). - -/// A container for a structured value for a specific kind of visitor. -#[derive(Clone, Copy)] -pub(super) enum Inner<'v> { - /// A simple primitive value that can be copied without allocating. - Primitive(Primitive<'v>), - /// A value that can be filled. - Fill(&'v Fill), - /// A debuggable value. - Debug(&'v fmt::Debug), - /// A displayable value. - Display(&'v fmt::Display), - - #[cfg(feature = "kv_unstable_sval")] - /// A structured value from `sval`. - Sval(&'v sval_support::Value), -} - -impl<'v> Inner<'v> { - pub(super) fn visit(&self, visitor: &mut Visitor) -> Result<(), Error> { - match *self { - Inner::Primitive(value) => match value { - Primitive::Signed(value) => visitor.i64(value), - Primitive::Unsigned(value) => visitor.u64(value), - Primitive::Float(value) => visitor.f64(value), - Primitive::Bool(value) => visitor.bool(value), - Primitive::Char(value) => visitor.char(value), - Primitive::Str(value) => visitor.str(value), - Primitive::None => visitor.none(), - }, - Inner::Fill(value) => value.fill(&mut Slot::new(visitor)), - Inner::Debug(value) => visitor.debug(value), - Inner::Display(value) => visitor.display(value), - - #[cfg(feature = "kv_unstable_sval")] - Inner::Sval(value) => visitor.sval(value), - } - } -} - -/// The internal serialization contract. -pub(super) trait Visitor { - fn debug(&mut self, v: &fmt::Debug) -> Result<(), Error>; - fn display(&mut self, v: &fmt::Display) -> Result<(), Error> { - self.debug(&format_args!("{}", v)) - } - - fn u64(&mut self, v: u64) -> Result<(), Error>; - fn i64(&mut self, v: i64) -> Result<(), Error>; - fn f64(&mut self, v: f64) -> Result<(), Error>; - fn bool(&mut self, v: bool) -> Result<(), Error>; - fn char(&mut self, v: char) -> Result<(), Error>; - fn str(&mut self, v: &str) -> Result<(), Error>; - fn none(&mut self) -> Result<(), Error>; - - #[cfg(feature = "kv_unstable_sval")] - fn sval(&mut self, v: &sval_support::Value) -> Result<(), Error>; -} - -#[derive(Clone, Copy)] -pub(super) enum Primitive<'v> { - Signed(i64), - Unsigned(u64), - Float(f64), - Bool(bool), - Char(char), - Str(&'v str), - None, -} - -mod fmt_support { - use super::*; - - impl<'v> kv::Value<'v> { - /// Get a value from a debuggable type. - pub fn from_debug(value: &'v T) -> Self - where - T: fmt::Debug, - { - kv::Value { - inner: Inner::Debug(value), - } - } - - /// Get a value from a displayable type. - pub fn from_display(value: &'v T) -> Self - where - T: fmt::Display, - { - kv::Value { - inner: Inner::Display(value), - } - } - } - - impl<'v> fmt::Debug for kv::Value<'v> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.visit(&mut FmtVisitor(f))?; - - Ok(()) - } - } - - impl<'v> fmt::Display for kv::Value<'v> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.visit(&mut FmtVisitor(f))?; - - Ok(()) - } - } - - struct FmtVisitor<'a, 'b: 'a>(&'a mut fmt::Formatter<'b>); - - impl<'a, 'b: 'a> Visitor for FmtVisitor<'a, 'b> { - fn debug(&mut self, v: &fmt::Debug) -> Result<(), Error> { - v.fmt(self.0)?; - - Ok(()) - } - - fn u64(&mut self, v: u64) -> Result<(), Error> { - self.debug(&format_args!("{:?}", v)) - } - - fn i64(&mut self, v: i64) -> Result<(), Error> { - self.debug(&format_args!("{:?}", v)) - } - - fn f64(&mut self, v: f64) -> Result<(), Error> { - self.debug(&format_args!("{:?}", v)) - } - - fn bool(&mut self, v: bool) -> Result<(), Error> { - self.debug(&format_args!("{:?}", v)) - } - - fn char(&mut self, v: char) -> Result<(), Error> { - self.debug(&format_args!("{:?}", v)) - } - - fn str(&mut self, v: &str) -> Result<(), Error> { - self.debug(&format_args!("{:?}", v)) - } - - fn none(&mut self) -> Result<(), Error> { - self.debug(&format_args!("None")) - } - - #[cfg(feature = "kv_unstable_sval")] - fn sval(&mut self, v: &sval_support::Value) -> Result<(), Error> { - sval_support::fmt(self.0, v) - } - } -} - -#[cfg(feature = "kv_unstable_sval")] -pub(super) mod sval_support { - use super::*; - - extern crate sval; - - impl<'v> kv::Value<'v> { - /// Get a value from a structured type. - pub fn from_sval(value: &'v T) -> Self - where - T: sval::Value, - { - kv::Value { - inner: Inner::Sval(value), - } - } - } - - impl<'v> sval::Value for kv::Value<'v> { - fn stream(&self, s: &mut sval::value::Stream) -> sval::value::Result { - self.visit(&mut SvalVisitor(s)).map_err(Error::into_sval)?; - - Ok(()) - } - } - - pub(in kv::value) use self::sval::Value; - - pub(super) fn fmt(f: &mut fmt::Formatter, v: &sval::Value) -> Result<(), Error> { - sval::fmt::debug(f, v)?; - Ok(()) - } - - impl Error { - fn from_sval(_: sval::value::Error) -> Self { - Error::msg("`sval` serialization failed") - } - - fn into_sval(self) -> sval::value::Error { - sval::value::Error::msg("`sval` serialization failed") - } - } - - struct SvalVisitor<'a, 'b: 'a>(&'a mut sval::value::Stream<'b>); - - impl<'a, 'b: 'a> Visitor for SvalVisitor<'a, 'b> { - fn debug(&mut self, v: &fmt::Debug) -> Result<(), Error> { - self.0.fmt(format_args!("{:?}", v)).map_err(Error::from_sval) - } - - fn u64(&mut self, v: u64) -> Result<(), Error> { - self.0.u64(v).map_err(Error::from_sval) - } - - fn i64(&mut self, v: i64) -> Result<(), Error> { - self.0.i64(v).map_err(Error::from_sval) - } - - fn f64(&mut self, v: f64) -> Result<(), Error> { - self.0.f64(v).map_err(Error::from_sval) - } - - fn bool(&mut self, v: bool) -> Result<(), Error> { - self.0.bool(v).map_err(Error::from_sval) - } - - fn char(&mut self, v: char) -> Result<(), Error> { - self.0.char(v).map_err(Error::from_sval) - } - - fn str(&mut self, v: &str) -> Result<(), Error> { - self.0.str(v).map_err(Error::from_sval) - } - - fn none(&mut self) -> Result<(), Error> { - self.0.none().map_err(Error::from_sval) - } - - fn sval(&mut self, v: &sval::Value) -> Result<(), Error> { - self.0.any(v).map_err(Error::from_sval) - } - } - - #[cfg(test)] - mod tests { - use super::*; - use kv::value::test::Token; - - #[test] - fn test_from_sval() { - assert_eq!(kv::Value::from_sval(&42u64).to_token(), Token::Sval); - } - - #[test] - fn test_sval_structured() { - let value = kv::Value::from(42u64); - let expected = vec![sval::test::Token::Unsigned(42)]; - - assert_eq!(sval::test::tokens(value), expected); - } - } -} diff -Nru cargo-0.44.1/vendor/log/src/kv/value/mod.rs cargo-0.47.0/vendor/log/src/kv/value/mod.rs --- cargo-0.44.1/vendor/log/src/kv/value/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/src/kv/value/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,155 +1,56 @@ -//! Structured values. - -use std::fmt; - -mod internal; -mod impls; - -#[cfg(test)] -pub(in kv) mod test; - -pub use kv::Error; - -use self::internal::{Inner, Visitor, Primitive}; - -/// A type that can be converted into a [`Value`](struct.Value.html). -pub trait ToValue { - /// Perform the conversion. - fn to_value(&self) -> Value; -} - -impl<'a, T> ToValue for &'a T -where - T: ToValue + ?Sized, -{ - fn to_value(&self) -> Value { - (**self).to_value() - } -} - -impl<'v> ToValue for Value<'v> { - fn to_value(&self) -> Value { - Value { - inner: self.inner, - } - } -} - -/// A type that requires extra work to convert into a [`Value`](struct.Value.html). -/// -/// This trait is a more advanced initialization API than [`ToValue`](trait.ToValue.html). -/// It's intended for erased values coming from other logging frameworks that may need -/// to perform extra work to determine the concrete type to use. -pub trait Fill { - /// Fill a value. - fn fill(&self, slot: &mut Slot) -> Result<(), Error>; -} - -impl<'a, T> Fill for &'a T -where - T: Fill + ?Sized, -{ - fn fill(&self, slot: &mut Slot) -> Result<(), Error> { - (**self).fill(slot) - } -} - -/// A value slot to fill using the [`Fill`](trait.Fill.html) trait. -pub struct Slot<'a> { - filled: bool, - visitor: &'a mut Visitor, -} - -impl<'a> fmt::Debug for Slot<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Slot").finish() - } -} - -impl<'a> Slot<'a> { - fn new(visitor: &'a mut Visitor) -> Self { - Slot { - visitor, - filled: false, - } - } - - /// Fill the slot with a value. - /// - /// The given value doesn't need to satisfy any particular lifetime constraints. - /// - /// # Panics - /// - /// Calling `fill` more than once will panic. - pub fn fill(&mut self, value: Value) -> Result<(), Error> { - assert!(!self.filled, "the slot has already been filled"); - self.filled = true; - - value.visit(self.visitor) - } -} - -/// A value in a structured key-value pair. -pub struct Value<'v> { - inner: Inner<'v>, -} - -impl<'v> Value<'v> { - /// Get a value from an internal `Visit`. - fn from_primitive(value: Primitive<'v>) -> Self { - Value { - inner: Inner::Primitive(value), - } - } - - /// Get a value from a fillable slot. - pub fn from_fill(value: &'v T) -> Self - where - T: Fill, - { - Value { - inner: Inner::Fill(value), - } - } - - fn visit(&self, visitor: &mut Visitor) -> Result<(), Error> { - self.inner.visit(visitor) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn fill_value() { - struct TestFill; - - impl Fill for TestFill { - fn fill(&self, slot: &mut Slot) -> Result<(), Error> { - let dbg: &fmt::Debug = &1; - - slot.fill(Value::from_debug(&dbg)) - } - } - - assert_eq!("1", Value::from_fill(&TestFill).to_string()); - } - - #[test] - #[should_panic] - fn fill_multiple_times_panics() { - struct BadFill; - - impl Fill for BadFill { - fn fill(&self, slot: &mut Slot) -> Result<(), Error> { - slot.fill(42.into())?; - slot.fill(6789.into())?; - - Ok(()) - } - } - - let _ = Value::from_fill(&BadFill).to_string(); - } -} +//! Structured values. + +mod fill; +mod impls; +mod internal; + +#[cfg(test)] +pub(in kv) mod test; + +pub use self::fill::{Fill, Slot}; +pub use kv::Error; + +use self::internal::{Inner, Primitive, Visitor}; + +/// A type that can be converted into a [`Value`](struct.Value.html). +pub trait ToValue { + /// Perform the conversion. + fn to_value(&self) -> Value; +} + +impl<'a, T> ToValue for &'a T +where + T: ToValue + ?Sized, +{ + fn to_value(&self) -> Value { + (**self).to_value() + } +} + +impl<'v> ToValue for Value<'v> { + fn to_value(&self) -> Value { + Value { inner: self.inner } + } +} + +/// A value in a structured key-value pair. +pub struct Value<'v> { + inner: Inner<'v>, +} + +impl<'v> Value<'v> { + /// Get a value from an internal primitive. + fn from_primitive(value: T) -> Self + where + T: Into>, + { + Value { + inner: Inner::Primitive(value.into()), + } + } + + /// Visit the value using an internal visitor. + fn visit<'a>(&'a self, visitor: &mut dyn Visitor<'a>) -> Result<(), Error> { + self.inner.visit(visitor) + } +} diff -Nru cargo-0.44.1/vendor/log/src/kv/value/test.rs cargo-0.47.0/vendor/log/src/kv/value/test.rs --- cargo-0.44.1/vendor/log/src/kv/value/test.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/src/kv/value/test.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,81 +1,81 @@ -// Test support for inspecting Values - -use std::fmt; -use std::str; - -use super::{Value, Error}; -use super::internal; - -#[derive(Debug, PartialEq)] -pub(in kv) enum Token { - U64(u64), - I64(i64), - F64(f64), - Char(char), - Bool(bool), - Str(String), - None, - - #[cfg(feature = "kv_unstable_sval")] - Sval, -} - -#[cfg(test)] -impl<'v> Value<'v> { - pub(in kv) fn to_token(&self) -> Token { - struct TestVisitor(Option); - - impl internal::Visitor for TestVisitor { - fn debug(&mut self, v: &fmt::Debug) -> Result<(), Error> { - self.0 = Some(Token::Str(format!("{:?}", v))); - Ok(()) - } - - fn u64(&mut self, v: u64) -> Result<(), Error> { - self.0 = Some(Token::U64(v)); - Ok(()) - } - - fn i64(&mut self, v: i64) -> Result<(), Error> { - self.0 = Some(Token::I64(v)); - Ok(()) - } - - fn f64(&mut self, v: f64) -> Result<(), Error> { - self.0 = Some(Token::F64(v)); - Ok(()) - } - - fn bool(&mut self, v: bool) -> Result<(), Error> { - self.0 = Some(Token::Bool(v)); - Ok(()) - } - - fn char(&mut self, v: char) -> Result<(), Error> { - self.0 = Some(Token::Char(v)); - Ok(()) - } - - fn str(&mut self, v: &str) -> Result<(), Error> { - self.0 = Some(Token::Str(v.into())); - Ok(()) - } - - fn none(&mut self) -> Result<(), Error> { - self.0 = Some(Token::None); - Ok(()) - } - - #[cfg(feature = "kv_unstable_sval")] - fn sval(&mut self, _: &internal::sval_support::Value) -> Result<(), Error> { - self.0 = Some(Token::Sval); - Ok(()) - } - } - - let mut visitor = TestVisitor(None); - self.visit(&mut visitor).unwrap(); - - visitor.0.unwrap() - } -} +// Test support for inspecting Values + +use std::fmt; +use std::str; + +use super::internal; +use super::{Error, Value}; + +#[derive(Debug, PartialEq)] +pub(in kv) enum Token { + U64(u64), + I64(i64), + F64(f64), + Char(char), + Bool(bool), + Str(String), + None, + + #[cfg(feature = "kv_unstable_sval")] + Sval, +} + +#[cfg(test)] +impl<'v> Value<'v> { + pub(in kv) fn to_token(&self) -> Token { + struct TestVisitor(Option); + + impl<'v> internal::Visitor<'v> for TestVisitor { + fn debug(&mut self, v: &dyn fmt::Debug) -> Result<(), Error> { + self.0 = Some(Token::Str(format!("{:?}", v))); + Ok(()) + } + + fn u64(&mut self, v: u64) -> Result<(), Error> { + self.0 = Some(Token::U64(v)); + Ok(()) + } + + fn i64(&mut self, v: i64) -> Result<(), Error> { + self.0 = Some(Token::I64(v)); + Ok(()) + } + + fn f64(&mut self, v: f64) -> Result<(), Error> { + self.0 = Some(Token::F64(v)); + Ok(()) + } + + fn bool(&mut self, v: bool) -> Result<(), Error> { + self.0 = Some(Token::Bool(v)); + Ok(()) + } + + fn char(&mut self, v: char) -> Result<(), Error> { + self.0 = Some(Token::Char(v)); + Ok(()) + } + + fn str(&mut self, v: &str) -> Result<(), Error> { + self.0 = Some(Token::Str(v.into())); + Ok(()) + } + + fn none(&mut self) -> Result<(), Error> { + self.0 = Some(Token::None); + Ok(()) + } + + #[cfg(feature = "kv_unstable_sval")] + fn sval(&mut self, _: &dyn internal::sval::Value) -> Result<(), Error> { + self.0 = Some(Token::Sval); + Ok(()) + } + } + + let mut visitor = TestVisitor(None); + self.visit(&mut visitor).unwrap(); + + visitor.0.unwrap() + } +} diff -Nru cargo-0.44.1/vendor/log/src/lib.rs cargo-0.47.0/vendor/log/src/lib.rs --- cargo-0.44.1/vendor/log/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,1668 +1,1679 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A lightweight logging facade. -//! -//! The `log` crate provides a single logging API that abstracts over the -//! actual logging implementation. Libraries can use the logging API provided -//! by this crate, and the consumer of those libraries can choose the logging -//! implementation that is most suitable for its use case. -//! -//! If no logging implementation is selected, the facade falls back to a "noop" -//! implementation that ignores all log messages. The overhead in this case -//! is very small - just an integer load, comparison and jump. -//! -//! A log request consists of a _target_, a _level_, and a _body_. A target is a -//! string which defaults to the module path of the location of the log request, -//! though that default may be overridden. Logger implementations typically use -//! the target to filter requests based on some user configuration. -//! -//! # Use -//! -//! The basic use of the log crate is through the five logging macros: [`error!`], -//! [`warn!`], [`info!`], [`debug!`] and [`trace!`] -//! where `error!` represents the highest-priority log messages -//! and `trace!` the lowest. The log messages are filtered by configuring -//! the log level to exclude messages with a lower priority. -//! Each of these macros accept format strings similarly to [`println!`]. -//! -//! -//! [`error!`]: ./macro.error.html -//! [`warn!`]: ./macro.warn.html -//! [`info!`]: ./macro.info.html -//! [`debug!`]: ./macro.debug.html -//! [`trace!`]: ./macro.trace.html -//! [`println!`]: https://doc.rust-lang.org/stable/std/macro.println.html -//! -//! ## In libraries -//! -//! Libraries should link only to the `log` crate, and use the provided -//! macros to log whatever information will be useful to downstream consumers. -//! -//! ### Examples -//! -//! ```edition2018 -//! # #[derive(Debug)] pub struct Yak(String); -//! # impl Yak { fn shave(&mut self, _: u32) {} } -//! # fn find_a_razor() -> Result { Ok(1) } -//! use log::{info, warn}; -//! -//! pub fn shave_the_yak(yak: &mut Yak) { -//! info!(target: "yak_events", "Commencing yak shaving for {:?}", yak); -//! -//! loop { -//! match find_a_razor() { -//! Ok(razor) => { -//! info!("Razor located: {}", razor); -//! yak.shave(razor); -//! break; -//! } -//! Err(err) => { -//! warn!("Unable to locate a razor: {}, retrying", err); -//! } -//! } -//! } -//! } -//! # fn main() {} -//! ``` -//! -//! ## In executables -//! -//! Executables should choose a logging implementation and initialize it early in the -//! runtime of the program. Logging implementations will typically include a -//! function to do this. Any log messages generated before -//! the implementation is initialized will be ignored. -//! -//! The executable itself may use the `log` crate to log as well. -//! -//! ### Warning -//! -//! The logging system may only be initialized once. -//! -//! # Available logging implementations -//! -//! In order to produce log output executables have to use -//! a logger implementation compatible with the facade. -//! There are many available implementations to choose from, -//! here are some of the most popular ones: -//! -//! * Simple minimal loggers: -//! * [env_logger] -//! * [simple_logger] -//! * [simplelog] -//! * [pretty_env_logger] -//! * [stderrlog] -//! * [flexi_logger] -//! * Complex configurable frameworks: -//! * [log4rs] -//! * [fern] -//! * Adaptors for other facilities: -//! * [syslog] -//! * [slog-stdlog] -//! -//! # Implementing a Logger -//! -//! Loggers implement the [`Log`] trait. Here's a very basic example that simply -//! logs all messages at the [`Error`][level_link], [`Warn`][level_link] or -//! [`Info`][level_link] levels to stdout: -//! -//! ```edition2018 -//! use log::{Record, Level, Metadata}; -//! -//! struct SimpleLogger; -//! -//! impl log::Log for SimpleLogger { -//! fn enabled(&self, metadata: &Metadata) -> bool { -//! metadata.level() <= Level::Info -//! } -//! -//! fn log(&self, record: &Record) { -//! if self.enabled(record.metadata()) { -//! println!("{} - {}", record.level(), record.args()); -//! } -//! } -//! -//! fn flush(&self) {} -//! } -//! -//! # fn main() {} -//! ``` -//! -//! Loggers are installed by calling the [`set_logger`] function. The maximum -//! log level also needs to be adjusted via the [`set_max_level`] function. The -//! logging facade uses this as an optimization to improve performance of log -//! messages at levels that are disabled. It's important to set it, as it -//! defaults to [`Off`][filter_link], so no log messages will ever be captured! -//! In the case of our example logger, we'll want to set the maximum log level -//! to [`Info`][filter_link], since we ignore any [`Debug`][level_link] or -//! [`Trace`][level_link] level log messages. A logging implementation should -//! provide a function that wraps a call to [`set_logger`] and -//! [`set_max_level`], handling initialization of the logger: -//! -//! ```edition2018 -//! # use log::{Level, Metadata}; -//! # struct SimpleLogger; -//! # impl log::Log for SimpleLogger { -//! # fn enabled(&self, _: &Metadata) -> bool { false } -//! # fn log(&self, _: &log::Record) {} -//! # fn flush(&self) {} -//! # } -//! # fn main() {} -//! use log::{SetLoggerError, LevelFilter}; -//! -//! static LOGGER: SimpleLogger = SimpleLogger; -//! -//! pub fn init() -> Result<(), SetLoggerError> { -//! log::set_logger(&LOGGER) -//! .map(|()| log::set_max_level(LevelFilter::Info)) -//! } -//! ``` -//! -//! Implementations that adjust their configurations at runtime should take care -//! to adjust the maximum log level as well. -//! -//! # Use with `std` -//! -//! `set_logger` requires you to provide a `&'static Log`, which can be hard to -//! obtain if your logger depends on some runtime configuration. The -//! `set_boxed_logger` function is available with the `std` Cargo feature. It is -//! identical to `set_logger` except that it takes a `Box` rather than a -//! `&'static Log`: -//! -//! ```edition2018 -//! # use log::{Level, LevelFilter, Log, SetLoggerError, Metadata}; -//! # struct SimpleLogger; -//! # impl log::Log for SimpleLogger { -//! # fn enabled(&self, _: &Metadata) -> bool { false } -//! # fn log(&self, _: &log::Record) {} -//! # fn flush(&self) {} -//! # } -//! # fn main() {} -//! # #[cfg(feature = "std")] -//! pub fn init() -> Result<(), SetLoggerError> { -//! log::set_boxed_logger(Box::new(SimpleLogger)) -//! .map(|()| log::set_max_level(LevelFilter::Info)) -//! } -//! ``` -//! -//! # Compile time filters -//! -//! Log levels can be statically disabled at compile time via Cargo features. Log invocations at -//! disabled levels will be skipped and will not even be present in the resulting binary unless the -//! log level is specified dynamically. This level is configured separately for release and debug -//! builds. The features are: -//! -//! * `max_level_off` -//! * `max_level_error` -//! * `max_level_warn` -//! * `max_level_info` -//! * `max_level_debug` -//! * `max_level_trace` -//! * `release_max_level_off` -//! * `release_max_level_error` -//! * `release_max_level_warn` -//! * `release_max_level_info` -//! * `release_max_level_debug` -//! * `release_max_level_trace` -//! -//! These features control the value of the `STATIC_MAX_LEVEL` constant. The logging macros check -//! this value before logging a message. By default, no levels are disabled. -//! -//! Libraries should avoid using the max level features because they're global and can't be changed -//! once they're set. -//! -//! For example, a crate can disable trace level logs in debug builds and trace, debug, and info -//! level logs in release builds with the following configuration: -//! -//! ```toml -//! [dependencies] -//! log = { version = "0.4", features = ["max_level_debug", "release_max_level_warn"] } -//! ``` -//! # Crate Feature Flags -//! -//! The following crate feature flags are avaliable in addition to the filters. They are -//! configured in your `Cargo.toml`. -//! -//! * `std` allows use of `std` crate instead of the default `core`. Enables using `std::error` and -//! `set_boxed_logger` functionality. -//! * `serde` enables support for serialization and deserialization of `Level` and `LevelFilter`. -//! -//! ```toml -//! [dependencies] -//! log = { version = "0.4", features = ["std", "serde"] } -//! ``` -//! -//! # Version compatibility -//! -//! The 0.3 and 0.4 versions of the `log` crate are almost entirely compatible. Log messages -//! made using `log` 0.3 will forward transparently to a logger implementation using `log` 0.4. Log -//! messages made using `log` 0.4 will forward to a logger implementation using `log` 0.3, but the -//! module path and file name information associated with the message will unfortunately be lost. -//! -//! [`Log`]: trait.Log.html -//! [level_link]: enum.Level.html -//! [filter_link]: enum.LevelFilter.html -//! [`set_logger`]: fn.set_logger.html -//! [`set_max_level`]: fn.set_max_level.html -//! [`try_set_logger_raw`]: fn.try_set_logger_raw.html -//! [`shutdown_logger_raw`]: fn.shutdown_logger_raw.html -//! [env_logger]: https://docs.rs/env_logger/*/env_logger/ -//! [simple_logger]: https://github.com/borntyping/rust-simple_logger -//! [simplelog]: https://github.com/drakulix/simplelog.rs -//! [pretty_env_logger]: https://docs.rs/pretty_env_logger/*/pretty_env_logger/ -//! [stderrlog]: https://docs.rs/stderrlog/*/stderrlog/ -//! [flexi_logger]: https://docs.rs/flexi_logger/*/flexi_logger/ -//! [syslog]: https://docs.rs/syslog/*/syslog/ -//! [slog-stdlog]: https://docs.rs/slog-stdlog/*/slog_stdlog/ -//! [log4rs]: https://docs.rs/log4rs/*/log4rs/ -//! [fern]: https://docs.rs/fern/*/fern/ - -#![doc( - html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://www.rust-lang.org/favicon.ico", - html_root_url = "https://docs.rs/log/0.4.8" -)] -#![warn(missing_docs)] -#![deny(missing_debug_implementations)] -#![cfg_attr(all(not(feature = "std"), not(test)), no_std)] -// When compiled for the rustc compiler itself we want to make sure that this is -// an unstable crate -#![cfg_attr(rustbuild, feature(staged_api, rustc_private))] -#![cfg_attr(rustbuild, unstable(feature = "rustc_private", issue = "27812"))] - -#[cfg(all(not(feature = "std"), not(test)))] -extern crate core as std; - -#[macro_use] -extern crate cfg_if; - -use std::cmp; -#[cfg(feature = "std")] -use std::error; -use std::fmt; -use std::mem; -use std::str::FromStr; -use std::sync::atomic::{AtomicUsize, Ordering}; - -// FIXME: ATOMIC_USIZE_INIT was deprecated in rust 1.34. Silence the -// deprecation warning until our MSRV >= 1.24, where we can use the -// replacement const fn `AtomicUsize::new` -#[allow(deprecated)] -use std::sync::atomic::ATOMIC_USIZE_INIT; - -#[macro_use] -mod macros; -mod serde; - -#[cfg(feature = "kv_unstable")] -pub mod kv; - -// The LOGGER static holds a pointer to the global logger. It is protected by -// the STATE static which determines whether LOGGER has been initialized yet. -static mut LOGGER: &'static Log = &NopLogger; - -#[allow(deprecated)] -static STATE: AtomicUsize = ATOMIC_USIZE_INIT; - -// There are three different states that we care about: the logger's -// uninitialized, the logger's initializing (set_logger's been called but -// LOGGER hasn't actually been set yet), or the logger's active. -const UNINITIALIZED: usize = 0; -const INITIALIZING: usize = 1; -const INITIALIZED: usize = 2; - -#[allow(deprecated)] -static MAX_LOG_LEVEL_FILTER: AtomicUsize = ATOMIC_USIZE_INIT; - -static LOG_LEVEL_NAMES: [&'static str; 6] = ["OFF", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"]; - -static SET_LOGGER_ERROR: &'static str = "attempted to set a logger after the logging system \ - was already initialized"; -static LEVEL_PARSE_ERROR: &'static str = - "attempted to convert a string that doesn't match an existing log level"; - -/// An enum representing the available verbosity levels of the logger. -/// -/// Typical usage includes: checking if a certain `Level` is enabled with -/// [`log_enabled!`](macro.log_enabled.html), specifying the `Level` of -/// [`log!`](macro.log.html), and comparing a `Level` directly to a -/// [`LevelFilter`](enum.LevelFilter.html). -#[repr(usize)] -#[derive(Copy, Eq, Debug, Hash)] -pub enum Level { - /// The "error" level. - /// - /// Designates very serious errors. - Error = 1, // This way these line up with the discriminants for LevelFilter below - /// The "warn" level. - /// - /// Designates hazardous situations. - Warn, - /// The "info" level. - /// - /// Designates useful information. - Info, - /// The "debug" level. - /// - /// Designates lower priority information. - Debug, - /// The "trace" level. - /// - /// Designates very low priority, often extremely verbose, information. - Trace, -} - -impl Clone for Level { - #[inline] - fn clone(&self) -> Level { - *self - } -} - -impl PartialEq for Level { - #[inline] - fn eq(&self, other: &Level) -> bool { - *self as usize == *other as usize - } -} - -impl PartialEq for Level { - #[inline] - fn eq(&self, other: &LevelFilter) -> bool { - *self as usize == *other as usize - } -} - -impl PartialOrd for Level { - #[inline] - fn partial_cmp(&self, other: &Level) -> Option { - Some(self.cmp(other)) - } - - #[inline] - fn lt(&self, other: &Level) -> bool { - (*self as usize) < *other as usize - } - - #[inline] - fn le(&self, other: &Level) -> bool { - *self as usize <= *other as usize - } - - #[inline] - fn gt(&self, other: &Level) -> bool { - *self as usize > *other as usize - } - - #[inline] - fn ge(&self, other: &Level) -> bool { - *self as usize >= *other as usize - } -} - -impl PartialOrd for Level { - #[inline] - fn partial_cmp(&self, other: &LevelFilter) -> Option { - Some((*self as usize).cmp(&(*other as usize))) - } - - #[inline] - fn lt(&self, other: &LevelFilter) -> bool { - (*self as usize) < *other as usize - } - - #[inline] - fn le(&self, other: &LevelFilter) -> bool { - *self as usize <= *other as usize - } - - #[inline] - fn gt(&self, other: &LevelFilter) -> bool { - *self as usize > *other as usize - } - - #[inline] - fn ge(&self, other: &LevelFilter) -> bool { - *self as usize >= *other as usize - } -} - -impl Ord for Level { - #[inline] - fn cmp(&self, other: &Level) -> cmp::Ordering { - (*self as usize).cmp(&(*other as usize)) - } -} - -fn ok_or(t: Option, e: E) -> Result { - match t { - Some(t) => Ok(t), - None => Err(e), - } -} - -// Reimplemented here because std::ascii is not available in libcore -fn eq_ignore_ascii_case(a: &str, b: &str) -> bool { - fn to_ascii_uppercase(c: u8) -> u8 { - if c >= b'a' && c <= b'z' { - c - b'a' + b'A' - } else { - c - } - } - - if a.len() == b.len() { - a.bytes() - .zip(b.bytes()) - .all(|(a, b)| to_ascii_uppercase(a) == to_ascii_uppercase(b)) - } else { - false - } -} - -impl FromStr for Level { - type Err = ParseLevelError; - fn from_str(level: &str) -> Result { - ok_or( - LOG_LEVEL_NAMES - .iter() - .position(|&name| eq_ignore_ascii_case(name, level)) - .into_iter() - .filter(|&idx| idx != 0) - .map(|idx| Level::from_usize(idx).unwrap()) - .next(), - ParseLevelError(()), - ) - } -} - -impl fmt::Display for Level { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.pad(LOG_LEVEL_NAMES[*self as usize]) - } -} - -impl Level { - fn from_usize(u: usize) -> Option { - match u { - 1 => Some(Level::Error), - 2 => Some(Level::Warn), - 3 => Some(Level::Info), - 4 => Some(Level::Debug), - 5 => Some(Level::Trace), - _ => None, - } - } - - /// Returns the most verbose logging level. - #[inline] - pub fn max() -> Level { - Level::Trace - } - - /// Converts the `Level` to the equivalent `LevelFilter`. - #[inline] - pub fn to_level_filter(&self) -> LevelFilter { - LevelFilter::from_usize(*self as usize).unwrap() - } -} - -/// An enum representing the available verbosity level filters of the logger. -/// -/// A `LevelFilter` may be compared directly to a [`Level`]. Use this type -/// to get and set the maximum log level with [`max_level()`] and [`set_max_level`]. -/// -/// [`Level`]: enum.Level.html -/// [`max_level()`]: fn.max_level.html -/// [`set_max_level`]: fn.set_max_level.html -#[repr(usize)] -#[derive(Copy, Eq, Debug, Hash)] -pub enum LevelFilter { - /// A level lower than all log levels. - Off, - /// Corresponds to the `Error` log level. - Error, - /// Corresponds to the `Warn` log level. - Warn, - /// Corresponds to the `Info` log level. - Info, - /// Corresponds to the `Debug` log level. - Debug, - /// Corresponds to the `Trace` log level. - Trace, -} - -// Deriving generates terrible impls of these traits - -impl Clone for LevelFilter { - #[inline] - fn clone(&self) -> LevelFilter { - *self - } -} - -impl PartialEq for LevelFilter { - #[inline] - fn eq(&self, other: &LevelFilter) -> bool { - *self as usize == *other as usize - } -} - -impl PartialEq for LevelFilter { - #[inline] - fn eq(&self, other: &Level) -> bool { - other.eq(self) - } -} - -impl PartialOrd for LevelFilter { - #[inline] - fn partial_cmp(&self, other: &LevelFilter) -> Option { - Some(self.cmp(other)) - } - - #[inline] - fn lt(&self, other: &LevelFilter) -> bool { - (*self as usize) < *other as usize - } - - #[inline] - fn le(&self, other: &LevelFilter) -> bool { - *self as usize <= *other as usize - } - - #[inline] - fn gt(&self, other: &LevelFilter) -> bool { - *self as usize > *other as usize - } - - #[inline] - fn ge(&self, other: &LevelFilter) -> bool { - *self as usize >= *other as usize - } -} - -impl PartialOrd for LevelFilter { - #[inline] - fn partial_cmp(&self, other: &Level) -> Option { - Some((*self as usize).cmp(&(*other as usize))) - } - - #[inline] - fn lt(&self, other: &Level) -> bool { - (*self as usize) < *other as usize - } - - #[inline] - fn le(&self, other: &Level) -> bool { - *self as usize <= *other as usize - } - - #[inline] - fn gt(&self, other: &Level) -> bool { - *self as usize > *other as usize - } - - #[inline] - fn ge(&self, other: &Level) -> bool { - *self as usize >= *other as usize - } -} - -impl Ord for LevelFilter { - #[inline] - fn cmp(&self, other: &LevelFilter) -> cmp::Ordering { - (*self as usize).cmp(&(*other as usize)) - } -} - -impl FromStr for LevelFilter { - type Err = ParseLevelError; - fn from_str(level: &str) -> Result { - ok_or( - LOG_LEVEL_NAMES - .iter() - .position(|&name| eq_ignore_ascii_case(name, level)) - .map(|p| LevelFilter::from_usize(p).unwrap()), - ParseLevelError(()), - ) - } -} - -impl fmt::Display for LevelFilter { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.pad(LOG_LEVEL_NAMES[*self as usize]) - } -} - -impl LevelFilter { - fn from_usize(u: usize) -> Option { - match u { - 0 => Some(LevelFilter::Off), - 1 => Some(LevelFilter::Error), - 2 => Some(LevelFilter::Warn), - 3 => Some(LevelFilter::Info), - 4 => Some(LevelFilter::Debug), - 5 => Some(LevelFilter::Trace), - _ => None, - } - } - /// Returns the most verbose logging level filter. - #[inline] - pub fn max() -> LevelFilter { - LevelFilter::Trace - } - - /// Converts `self` to the equivalent `Level`. - /// - /// Returns `None` if `self` is `LevelFilter::Off`. - #[inline] - pub fn to_level(&self) -> Option { - Level::from_usize(*self as usize) - } -} - -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] -enum MaybeStaticStr<'a> { - Static(&'static str), - Borrowed(&'a str), -} - -impl<'a> MaybeStaticStr<'a> { - #[inline] - fn get(&self) -> &'a str { - match *self { - MaybeStaticStr::Static(s) => s, - MaybeStaticStr::Borrowed(s) => s, - } - } -} - -/// The "payload" of a log message. -/// -/// # Use -/// -/// `Record` structures are passed as parameters to the [`log`][method.log] -/// method of the [`Log`] trait. Logger implementors manipulate these -/// structures in order to display log messages. `Record`s are automatically -/// created by the [`log!`] macro and so are not seen by log users. -/// -/// Note that the [`level()`] and [`target()`] accessors are equivalent to -/// `self.metadata().level()` and `self.metadata().target()` respectively. -/// These methods are provided as a convenience for users of this structure. -/// -/// # Example -/// -/// The following example shows a simple logger that displays the level, -/// module path, and message of any `Record` that is passed to it. -/// -/// ```edition2018 -/// struct SimpleLogger; -/// -/// impl log::Log for SimpleLogger { -/// fn enabled(&self, metadata: &log::Metadata) -> bool { -/// true -/// } -/// -/// fn log(&self, record: &log::Record) { -/// if !self.enabled(record.metadata()) { -/// return; -/// } -/// -/// println!("{}:{} -- {}", -/// record.level(), -/// record.target(), -/// record.args()); -/// } -/// fn flush(&self) {} -/// } -/// ``` -/// -/// [method.log]: trait.Log.html#tymethod.log -/// [`Log`]: trait.Log.html -/// [`log!`]: macro.log.html -/// [`level()`]: struct.Record.html#method.level -/// [`target()`]: struct.Record.html#method.target -#[derive(Clone, Debug)] -pub struct Record<'a> { - metadata: Metadata<'a>, - args: fmt::Arguments<'a>, - module_path: Option>, - file: Option>, - line: Option, - #[cfg(feature = "kv_unstable")] - key_values: KeyValues<'a>, -} - -// This wrapper type is only needed so we can -// `#[derive(Debug)]` on `Record`. It also -// provides a useful `Debug` implementation for -// the underlying `Source`. -#[cfg(feature = "kv_unstable")] -#[derive(Clone)] -struct KeyValues<'a>(&'a kv::Source); - -#[cfg(feature = "kv_unstable")] -impl<'a> fmt::Debug for KeyValues<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut visitor = f.debug_map(); - self.0.visit(&mut visitor)?; - visitor.finish() - } -} - -impl<'a> Record<'a> { - /// Returns a new builder. - #[inline] - pub fn builder() -> RecordBuilder<'a> { - RecordBuilder::new() - } - - /// The message body. - #[inline] - pub fn args(&self) -> &fmt::Arguments<'a> { - &self.args - } - - /// Metadata about the log directive. - #[inline] - pub fn metadata(&self) -> &Metadata<'a> { - &self.metadata - } - - /// The verbosity level of the message. - #[inline] - pub fn level(&self) -> Level { - self.metadata.level() - } - - /// The name of the target of the directive. - #[inline] - pub fn target(&self) -> &'a str { - self.metadata.target() - } - - /// The module path of the message. - #[inline] - pub fn module_path(&self) -> Option<&'a str> { - self.module_path.map(|s| s.get()) - } - - /// The module path of the message, if it is a `'static` string. - #[inline] - pub fn module_path_static(&self) -> Option<&'static str> { - match self.module_path { - Some(MaybeStaticStr::Static(s)) => Some(s), - _ => None, - } - } - - /// The source file containing the message. - #[inline] - pub fn file(&self) -> Option<&'a str> { - self.file.map(|s| s.get()) - } - - /// The module path of the message, if it is a `'static` string. - #[inline] - pub fn file_static(&self) -> Option<&'static str> { - match self.file { - Some(MaybeStaticStr::Static(s)) => Some(s), - _ => None, - } - } - - /// The line containing the message. - #[inline] - pub fn line(&self) -> Option { - self.line - } - - /// The structued key-value pairs associated with the message. - #[cfg(feature = "kv_unstable")] - #[inline] - pub fn key_values(&self) -> &kv::Source { - self.key_values.0 - } - - /// Create a new [`Builder`](struct.Builder.html) based on this record. - #[cfg(feature = "kv_unstable")] - #[inline] - pub fn to_builder(&self) -> RecordBuilder { - RecordBuilder { - record: Record { - metadata: Metadata { - level: self.metadata.level, - target: self.metadata.target, - }, - args: self.args, - module_path: self.module_path, - file: self.file, - line: self.line, - key_values: self.key_values.clone(), - } - } - } -} - -/// Builder for [`Record`](struct.Record.html). -/// -/// Typically should only be used by log library creators or for testing and "shim loggers". -/// The `RecordBuilder` can set the different parameters of `Record` object, and returns -/// the created object when `build` is called. -/// -/// # Examples -/// -/// -/// ```edition2018 -/// use log::{Level, Record}; -/// -/// let record = Record::builder() -/// .args(format_args!("Error!")) -/// .level(Level::Error) -/// .target("myApp") -/// .file(Some("server.rs")) -/// .line(Some(144)) -/// .module_path(Some("server")) -/// .build(); -/// ``` -/// -/// Alternatively, use [`MetadataBuilder`](struct.MetadataBuilder.html): -/// -/// ```edition2018 -/// use log::{Record, Level, MetadataBuilder}; -/// -/// let error_metadata = MetadataBuilder::new() -/// .target("myApp") -/// .level(Level::Error) -/// .build(); -/// -/// let record = Record::builder() -/// .metadata(error_metadata) -/// .args(format_args!("Error!")) -/// .line(Some(433)) -/// .file(Some("app.rs")) -/// .module_path(Some("server")) -/// .build(); -/// ``` -#[derive(Debug)] -pub struct RecordBuilder<'a> { - record: Record<'a>, -} - -impl<'a> RecordBuilder<'a> { - /// Construct new `RecordBuilder`. - /// - /// The default options are: - /// - /// - `args`: [`format_args!("")`] - /// - `metadata`: [`Metadata::builder().build()`] - /// - `module_path`: `None` - /// - `file`: `None` - /// - `line`: `None` - /// - /// [`format_args!("")`]: https://doc.rust-lang.org/std/macro.format_args.html - /// [`Metadata::builder().build()`]: struct.MetadataBuilder.html#method.build - #[inline] - pub fn new() -> RecordBuilder<'a> { - #[cfg(feature = "kv_unstable")] - return RecordBuilder { - record: Record { - args: format_args!(""), - metadata: Metadata::builder().build(), - module_path: None, - file: None, - line: None, - key_values: KeyValues(&Option::None::<(kv::Key, kv::Value)>), - }, - }; - - #[cfg(not(feature = "kv_unstable"))] - return RecordBuilder { - record: Record { - args: format_args!(""), - metadata: Metadata::builder().build(), - module_path: None, - file: None, - line: None, - }, - }; - } - - /// Set [`args`](struct.Record.html#method.args). - #[inline] - pub fn args(&mut self, args: fmt::Arguments<'a>) -> &mut RecordBuilder<'a> { - self.record.args = args; - self - } - - /// Set [`metadata`](struct.Record.html#method.metadata). Construct a `Metadata` object with [`MetadataBuilder`](struct.MetadataBuilder.html). - #[inline] - pub fn metadata(&mut self, metadata: Metadata<'a>) -> &mut RecordBuilder<'a> { - self.record.metadata = metadata; - self - } - - /// Set [`Metadata::level`](struct.Metadata.html#method.level). - #[inline] - pub fn level(&mut self, level: Level) -> &mut RecordBuilder<'a> { - self.record.metadata.level = level; - self - } - - /// Set [`Metadata::target`](struct.Metadata.html#method.target) - #[inline] - pub fn target(&mut self, target: &'a str) -> &mut RecordBuilder<'a> { - self.record.metadata.target = target; - self - } - - /// Set [`module_path`](struct.Record.html#method.module_path) - #[inline] - pub fn module_path(&mut self, path: Option<&'a str>) -> &mut RecordBuilder<'a> { - self.record.module_path = path.map(MaybeStaticStr::Borrowed); - self - } - - /// Set [`module_path`](struct.Record.html#method.module_path) to a `'static` string - #[inline] - pub fn module_path_static(&mut self, path: Option<&'static str>) -> &mut RecordBuilder<'a> { - self.record.module_path = path.map(MaybeStaticStr::Static); - self - } - - /// Set [`file`](struct.Record.html#method.file) - #[inline] - pub fn file(&mut self, file: Option<&'a str>) -> &mut RecordBuilder<'a> { - self.record.file = file.map(MaybeStaticStr::Borrowed); - self - } - - /// Set [`file`](struct.Record.html#method.file) to a `'static` string. - #[inline] - pub fn file_static(&mut self, file: Option<&'static str>) -> &mut RecordBuilder<'a> { - self.record.file = file.map(MaybeStaticStr::Static); - self - } - - /// Set [`line`](struct.Record.html#method.line) - #[inline] - pub fn line(&mut self, line: Option) -> &mut RecordBuilder<'a> { - self.record.line = line; - self - } - - /// Set [`key_values`](struct.Record.html#method.key_values) - #[cfg(feature = "kv_unstable")] - #[inline] - pub fn key_values(&mut self, kvs: &'a kv::Source) -> &mut RecordBuilder<'a> { - self.record.key_values = KeyValues(kvs); - self - } - - /// Invoke the builder and return a `Record` - #[inline] - pub fn build(&self) -> Record<'a> { - self.record.clone() - } -} - -/// Metadata about a log message. -/// -/// # Use -/// -/// `Metadata` structs are created when users of the library use -/// logging macros. -/// -/// They are consumed by implementations of the `Log` trait in the -/// `enabled` method. -/// -/// `Record`s use `Metadata` to determine the log message's severity -/// and target. -/// -/// Users should use the `log_enabled!` macro in their code to avoid -/// constructing expensive log messages. -/// -/// # Examples -/// -/// ```edition2018 -/// use log::{Record, Level, Metadata}; -/// -/// struct MyLogger; -/// -/// impl log::Log for MyLogger { -/// fn enabled(&self, metadata: &Metadata) -> bool { -/// metadata.level() <= Level::Info -/// } -/// -/// fn log(&self, record: &Record) { -/// if self.enabled(record.metadata()) { -/// println!("{} - {}", record.level(), record.args()); -/// } -/// } -/// fn flush(&self) {} -/// } -/// -/// # fn main(){} -/// ``` -#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] -pub struct Metadata<'a> { - level: Level, - target: &'a str, -} - -impl<'a> Metadata<'a> { - /// Returns a new builder. - #[inline] - pub fn builder() -> MetadataBuilder<'a> { - MetadataBuilder::new() - } - - /// The verbosity level of the message. - #[inline] - pub fn level(&self) -> Level { - self.level - } - - /// The name of the target of the directive. - #[inline] - pub fn target(&self) -> &'a str { - self.target - } -} - -/// Builder for [`Metadata`](struct.Metadata.html). -/// -/// Typically should only be used by log library creators or for testing and "shim loggers". -/// The `MetadataBuilder` can set the different parameters of a `Metadata` object, and returns -/// the created object when `build` is called. -/// -/// # Example -/// -/// ```edition2018 -/// let target = "myApp"; -/// use log::{Level, MetadataBuilder}; -/// let metadata = MetadataBuilder::new() -/// .level(Level::Debug) -/// .target(target) -/// .build(); -/// ``` -#[derive(Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] -pub struct MetadataBuilder<'a> { - metadata: Metadata<'a>, -} - -impl<'a> MetadataBuilder<'a> { - /// Construct a new `MetadataBuilder`. - /// - /// The default options are: - /// - /// - `level`: `Level::Info` - /// - `target`: `""` - #[inline] - pub fn new() -> MetadataBuilder<'a> { - MetadataBuilder { - metadata: Metadata { - level: Level::Info, - target: "", - }, - } - } - - /// Setter for [`level`](struct.Metadata.html#method.level). - #[inline] - pub fn level(&mut self, arg: Level) -> &mut MetadataBuilder<'a> { - self.metadata.level = arg; - self - } - - /// Setter for [`target`](struct.Metadata.html#method.target). - #[inline] - pub fn target(&mut self, target: &'a str) -> &mut MetadataBuilder<'a> { - self.metadata.target = target; - self - } - - /// Returns a `Metadata` object. - #[inline] - pub fn build(&self) -> Metadata<'a> { - self.metadata.clone() - } -} - -/// A trait encapsulating the operations required of a logger. -pub trait Log: Sync + Send { - /// Determines if a log message with the specified metadata would be - /// logged. - /// - /// This is used by the `log_enabled!` macro to allow callers to avoid - /// expensive computation of log message arguments if the message would be - /// discarded anyway. - fn enabled(&self, metadata: &Metadata) -> bool; - - /// Logs the `Record`. - /// - /// Note that `enabled` is *not* necessarily called before this method. - /// Implementations of `log` should perform all necessary filtering - /// internally. - fn log(&self, record: &Record); - - /// Flushes any buffered records. - fn flush(&self); -} - -// Just used as a dummy initial value for LOGGER -struct NopLogger; - -impl Log for NopLogger { - fn enabled(&self, _: &Metadata) -> bool { - false - } - - fn log(&self, _: &Record) {} - fn flush(&self) {} -} - -/// Sets the global maximum log level. -/// -/// Generally, this should only be called by the active logging implementation. -#[inline] -pub fn set_max_level(level: LevelFilter) { - MAX_LOG_LEVEL_FILTER.store(level as usize, Ordering::SeqCst) -} - -/// Returns the current maximum log level. -/// -/// The [`log!`], [`error!`], [`warn!`], [`info!`], [`debug!`], and [`trace!`] macros check -/// this value and discard any message logged at a higher level. The maximum -/// log level is set by the [`set_max_level`] function. -/// -/// [`log!`]: macro.log.html -/// [`error!`]: macro.error.html -/// [`warn!`]: macro.warn.html -/// [`info!`]: macro.info.html -/// [`debug!`]: macro.debug.html -/// [`trace!`]: macro.trace.html -/// [`set_max_level`]: fn.set_max_level.html -#[inline(always)] -pub fn max_level() -> LevelFilter { - unsafe { mem::transmute(MAX_LOG_LEVEL_FILTER.load(Ordering::Relaxed)) } -} - -/// Sets the global logger to a `Box`. -/// -/// This is a simple convenience wrapper over `set_logger`, which takes a -/// `Box` rather than a `&'static Log`. See the documentation for -/// [`set_logger`] for more details. -/// -/// Requires the `std` feature. -/// -/// # Errors -/// -/// An error is returned if a logger has already been set. -/// -/// [`set_logger`]: fn.set_logger.html -#[cfg(all(feature = "std", atomic_cas))] -pub fn set_boxed_logger(logger: Box) -> Result<(), SetLoggerError> { - set_logger_inner(|| unsafe { &*Box::into_raw(logger) }) -} - -/// Sets the global logger to a `&'static Log`. -/// -/// This function may only be called once in the lifetime of a program. Any log -/// events that occur before the call to `set_logger` completes will be ignored. -/// -/// This function does not typically need to be called manually. Logger -/// implementations should provide an initialization method that installs the -/// logger internally. -/// -/// # Availability -/// -/// This method is available even when the `std` feature is disabled. However, -/// it is currently unavailable on `thumbv6` targets, which lack support for -/// some atomic operations which are used by this function. Even on those -/// targets, [`set_logger_racy`] will be available. -/// -/// # Errors -/// -/// An error is returned if a logger has already been set. -/// -/// # Examples -/// -/// ```edition2018 -/// use log::{error, info, warn, Record, Level, Metadata, LevelFilter}; -/// -/// static MY_LOGGER: MyLogger = MyLogger; -/// -/// struct MyLogger; -/// -/// impl log::Log for MyLogger { -/// fn enabled(&self, metadata: &Metadata) -> bool { -/// metadata.level() <= Level::Info -/// } -/// -/// fn log(&self, record: &Record) { -/// if self.enabled(record.metadata()) { -/// println!("{} - {}", record.level(), record.args()); -/// } -/// } -/// fn flush(&self) {} -/// } -/// -/// # fn main(){ -/// log::set_logger(&MY_LOGGER).unwrap(); -/// log::set_max_level(LevelFilter::Info); -/// -/// info!("hello log"); -/// warn!("warning"); -/// error!("oops"); -/// # } -/// ``` -/// -/// [`set_logger_racy`]: fn.set_logger_racy.html -#[cfg(atomic_cas)] -pub fn set_logger(logger: &'static Log) -> Result<(), SetLoggerError> { - set_logger_inner(|| logger) -} - -#[cfg(atomic_cas)] -fn set_logger_inner(make_logger: F) -> Result<(), SetLoggerError> -where - F: FnOnce() -> &'static Log, -{ - unsafe { - match STATE.compare_and_swap(UNINITIALIZED, INITIALIZING, Ordering::SeqCst) { - UNINITIALIZED => { - LOGGER = make_logger(); - STATE.store(INITIALIZED, Ordering::SeqCst); - Ok(()) - } - INITIALIZING => { - while STATE.load(Ordering::SeqCst) == INITIALIZING {} - Err(SetLoggerError(())) - } - _ => Err(SetLoggerError(())), - } - } -} - -/// A thread-unsafe version of [`set_logger`]. -/// -/// This function is available on all platforms, even those that do not have -/// support for atomics that is needed by [`set_logger`]. -/// -/// In almost all cases, [`set_logger`] should be preferred. -/// -/// # Safety -/// -/// This function is only safe to call when no other logger initialization -/// function is called while this function still executes. -/// -/// This can be upheld by (for example) making sure that **there are no other -/// threads**, and (on embedded) that **interrupts are disabled**. -/// -/// It is safe to use other logging functions while this function runs -/// (including all logging macros). -/// -/// [`set_logger`]: fn.set_logger.html -pub unsafe fn set_logger_racy(logger: &'static Log) -> Result<(), SetLoggerError> { - match STATE.load(Ordering::SeqCst) { - UNINITIALIZED => { - LOGGER = logger; - STATE.store(INITIALIZED, Ordering::SeqCst); - Ok(()) - } - INITIALIZING => { - // This is just plain UB, since we were racing another initialization function - unreachable!("set_logger_racy must not be used with other initialization functions") - } - _ => Err(SetLoggerError(())), - } -} - -/// The type returned by [`set_logger`] if [`set_logger`] has already been called. -/// -/// [`set_logger`]: fn.set_logger.html -#[allow(missing_copy_implementations)] -#[derive(Debug)] -pub struct SetLoggerError(()); - -impl fmt::Display for SetLoggerError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str(SET_LOGGER_ERROR) - } -} - -// The Error trait is not available in libcore -#[cfg(feature = "std")] -impl error::Error for SetLoggerError { - fn description(&self) -> &str { - SET_LOGGER_ERROR - } -} - -/// The type returned by [`from_str`] when the string doesn't match any of the log levels. -/// -/// [`from_str`]: https://doc.rust-lang.org/std/str/trait.FromStr.html#tymethod.from_str -#[allow(missing_copy_implementations)] -#[derive(Debug, PartialEq)] -pub struct ParseLevelError(()); - -impl fmt::Display for ParseLevelError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str(LEVEL_PARSE_ERROR) - } -} - -// The Error trait is not available in libcore -#[cfg(feature = "std")] -impl error::Error for ParseLevelError { - fn description(&self) -> &str { - LEVEL_PARSE_ERROR - } -} - -/// Returns a reference to the logger. -/// -/// If a logger has not been set, a no-op implementation is returned. -pub fn logger() -> &'static Log { - unsafe { - if STATE.load(Ordering::SeqCst) != INITIALIZED { - static NOP: NopLogger = NopLogger; - &NOP - } else { - LOGGER - } - } -} - -// WARNING: this is not part of the crate's public API and is subject to change at any time -#[doc(hidden)] -pub fn __private_api_log( - args: fmt::Arguments, - level: Level, - &(target, module_path, file, line): &(&str, &'static str, &'static str, u32), -) { - logger().log( - &Record::builder() - .args(args) - .level(level) - .target(target) - .module_path_static(Some(module_path)) - .file_static(Some(file)) - .line(Some(line)) - .build(), - ); -} - -// WARNING: this is not part of the crate's public API and is subject to change at any time -#[doc(hidden)] -pub fn __private_api_enabled(level: Level, target: &str) -> bool { - logger().enabled(&Metadata::builder().level(level).target(target).build()) -} - -/// The statically resolved maximum log level. -/// -/// See the crate level documentation for information on how to configure this. -/// -/// This value is checked by the log macros, but not by the `Log`ger returned by -/// the [`logger`] function. Code that manually calls functions on that value -/// should compare the level against this value. -/// -/// [`logger`]: fn.logger.html -pub const STATIC_MAX_LEVEL: LevelFilter = MAX_LEVEL_INNER; - -cfg_if! { - if #[cfg(all(not(debug_assertions), feature = "release_max_level_off"))] { - const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Off; - } else if #[cfg(all(not(debug_assertions), feature = "release_max_level_error"))] { - const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Error; - } else if #[cfg(all(not(debug_assertions), feature = "release_max_level_warn"))] { - const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Warn; - } else if #[cfg(all(not(debug_assertions), feature = "release_max_level_info"))] { - const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Info; - } else if #[cfg(all(not(debug_assertions), feature = "release_max_level_debug"))] { - const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Debug; - } else if #[cfg(all(not(debug_assertions), feature = "release_max_level_trace"))] { - const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Trace; - } else if #[cfg(feature = "max_level_off")] { - const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Off; - } else if #[cfg(feature = "max_level_error")] { - const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Error; - } else if #[cfg(feature = "max_level_warn")] { - const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Warn; - } else if #[cfg(feature = "max_level_info")] { - const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Info; - } else if #[cfg(feature = "max_level_debug")] { - const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Debug; - } else { - const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Trace; - } -} - -#[cfg(test)] -mod tests { - extern crate std; - use super::{Level, LevelFilter, ParseLevelError}; - use tests::std::string::ToString; - - #[test] - fn test_levelfilter_from_str() { - let tests = [ - ("off", Ok(LevelFilter::Off)), - ("error", Ok(LevelFilter::Error)), - ("warn", Ok(LevelFilter::Warn)), - ("info", Ok(LevelFilter::Info)), - ("debug", Ok(LevelFilter::Debug)), - ("trace", Ok(LevelFilter::Trace)), - ("OFF", Ok(LevelFilter::Off)), - ("ERROR", Ok(LevelFilter::Error)), - ("WARN", Ok(LevelFilter::Warn)), - ("INFO", Ok(LevelFilter::Info)), - ("DEBUG", Ok(LevelFilter::Debug)), - ("TRACE", Ok(LevelFilter::Trace)), - ("asdf", Err(ParseLevelError(()))), - ]; - for &(s, ref expected) in &tests { - assert_eq!(expected, &s.parse()); - } - } - - #[test] - fn test_level_from_str() { - let tests = [ - ("OFF", Err(ParseLevelError(()))), - ("error", Ok(Level::Error)), - ("warn", Ok(Level::Warn)), - ("info", Ok(Level::Info)), - ("debug", Ok(Level::Debug)), - ("trace", Ok(Level::Trace)), - ("ERROR", Ok(Level::Error)), - ("WARN", Ok(Level::Warn)), - ("INFO", Ok(Level::Info)), - ("DEBUG", Ok(Level::Debug)), - ("TRACE", Ok(Level::Trace)), - ("asdf", Err(ParseLevelError(()))), - ]; - for &(s, ref expected) in &tests { - assert_eq!(expected, &s.parse()); - } - } - - #[test] - fn test_level_show() { - assert_eq!("INFO", Level::Info.to_string()); - assert_eq!("ERROR", Level::Error.to_string()); - } - - #[test] - fn test_levelfilter_show() { - assert_eq!("OFF", LevelFilter::Off.to_string()); - assert_eq!("ERROR", LevelFilter::Error.to_string()); - } - - #[test] - fn test_cross_cmp() { - assert!(Level::Debug > LevelFilter::Error); - assert!(LevelFilter::Warn < Level::Trace); - assert!(LevelFilter::Off < Level::Error); - } - - #[test] - fn test_cross_eq() { - assert!(Level::Error == LevelFilter::Error); - assert!(LevelFilter::Off != Level::Error); - assert!(Level::Trace == LevelFilter::Trace); - } - - #[test] - fn test_to_level() { - assert_eq!(Some(Level::Error), LevelFilter::Error.to_level()); - assert_eq!(None, LevelFilter::Off.to_level()); - assert_eq!(Some(Level::Debug), LevelFilter::Debug.to_level()); - } - - #[test] - fn test_to_level_filter() { - assert_eq!(LevelFilter::Error, Level::Error.to_level_filter()); - assert_eq!(LevelFilter::Trace, Level::Trace.to_level_filter()); - } - - #[test] - #[cfg(feature = "std")] - fn test_error_trait() { - use super::SetLoggerError; - use std::error::Error; - let e = SetLoggerError(()); - assert_eq!( - e.description(), - "attempted to set a logger after the logging system \ - was already initialized" - ); - } - - #[test] - fn test_metadata_builder() { - use super::MetadataBuilder; - let target = "myApp"; - let metadata_test = MetadataBuilder::new() - .level(Level::Debug) - .target(target) - .build(); - assert_eq!(metadata_test.level(), Level::Debug); - assert_eq!(metadata_test.target(), "myApp"); - } - - #[test] - fn test_metadata_convenience_builder() { - use super::Metadata; - let target = "myApp"; - let metadata_test = Metadata::builder() - .level(Level::Debug) - .target(target) - .build(); - assert_eq!(metadata_test.level(), Level::Debug); - assert_eq!(metadata_test.target(), "myApp"); - } - - #[test] - fn test_record_builder() { - use super::{MetadataBuilder, RecordBuilder}; - let target = "myApp"; - let metadata = MetadataBuilder::new().target(target).build(); - let fmt_args = format_args!("hello"); - let record_test = RecordBuilder::new() - .args(fmt_args) - .metadata(metadata) - .module_path(Some("foo")) - .file(Some("bar")) - .line(Some(30)) - .build(); - assert_eq!(record_test.metadata().target(), "myApp"); - assert_eq!(record_test.module_path(), Some("foo")); - assert_eq!(record_test.file(), Some("bar")); - assert_eq!(record_test.line(), Some(30)); - } - - #[test] - fn test_record_convenience_builder() { - use super::{Metadata, Record}; - let target = "myApp"; - let metadata = Metadata::builder().target(target).build(); - let fmt_args = format_args!("hello"); - let record_test = Record::builder() - .args(fmt_args) - .metadata(metadata) - .module_path(Some("foo")) - .file(Some("bar")) - .line(Some(30)) - .build(); - assert_eq!(record_test.target(), "myApp"); - assert_eq!(record_test.module_path(), Some("foo")); - assert_eq!(record_test.file(), Some("bar")); - assert_eq!(record_test.line(), Some(30)); - } - - #[test] - fn test_record_complete_builder() { - use super::{Level, Record}; - let target = "myApp"; - let record_test = Record::builder() - .module_path(Some("foo")) - .file(Some("bar")) - .line(Some(30)) - .target(target) - .level(Level::Error) - .build(); - assert_eq!(record_test.target(), "myApp"); - assert_eq!(record_test.level(), Level::Error); - assert_eq!(record_test.module_path(), Some("foo")); - assert_eq!(record_test.file(), Some("bar")); - assert_eq!(record_test.line(), Some(30)); - } - - #[test] - #[cfg(feature = "kv_unstable")] - fn test_record_key_values_builder() { - use super::Record; - use kv::{self, Visitor}; - - struct TestVisitor { - seen_pairs: usize, - } - - impl<'kvs> Visitor<'kvs> for TestVisitor { - fn visit_pair( - &mut self, - _: kv::Key<'kvs>, - _: kv::Value<'kvs> - ) -> Result<(), kv::Error> { - self.seen_pairs += 1; - Ok(()) - } - } - - let kvs: &[(&str, i32)] = &[ - ("a", 1), - ("b", 2) - ]; - let record_test = Record::builder() - .key_values(&kvs) - .build(); - - let mut visitor = TestVisitor { - seen_pairs: 0, - }; - - record_test.key_values().visit(&mut visitor).unwrap(); - - assert_eq!(2, visitor.seen_pairs); - } -} +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A lightweight logging facade. +//! +//! The `log` crate provides a single logging API that abstracts over the +//! actual logging implementation. Libraries can use the logging API provided +//! by this crate, and the consumer of those libraries can choose the logging +//! implementation that is most suitable for its use case. +//! +//! If no logging implementation is selected, the facade falls back to a "noop" +//! implementation that ignores all log messages. The overhead in this case +//! is very small - just an integer load, comparison and jump. +//! +//! A log request consists of a _target_, a _level_, and a _body_. A target is a +//! string which defaults to the module path of the location of the log request, +//! though that default may be overridden. Logger implementations typically use +//! the target to filter requests based on some user configuration. +//! +//! # Use +//! +//! The basic use of the log crate is through the five logging macros: [`error!`], +//! [`warn!`], [`info!`], [`debug!`] and [`trace!`] +//! where `error!` represents the highest-priority log messages +//! and `trace!` the lowest. The log messages are filtered by configuring +//! the log level to exclude messages with a lower priority. +//! Each of these macros accept format strings similarly to [`println!`]. +//! +//! +//! [`error!`]: ./macro.error.html +//! [`warn!`]: ./macro.warn.html +//! [`info!`]: ./macro.info.html +//! [`debug!`]: ./macro.debug.html +//! [`trace!`]: ./macro.trace.html +//! [`println!`]: https://doc.rust-lang.org/stable/std/macro.println.html +//! +//! ## In libraries +//! +//! Libraries should link only to the `log` crate, and use the provided +//! macros to log whatever information will be useful to downstream consumers. +//! +//! ### Examples +//! +//! ```edition2018 +//! # #[derive(Debug)] pub struct Yak(String); +//! # impl Yak { fn shave(&mut self, _: u32) {} } +//! # fn find_a_razor() -> Result { Ok(1) } +//! use log::{info, warn}; +//! +//! pub fn shave_the_yak(yak: &mut Yak) { +//! info!(target: "yak_events", "Commencing yak shaving for {:?}", yak); +//! +//! loop { +//! match find_a_razor() { +//! Ok(razor) => { +//! info!("Razor located: {}", razor); +//! yak.shave(razor); +//! break; +//! } +//! Err(err) => { +//! warn!("Unable to locate a razor: {}, retrying", err); +//! } +//! } +//! } +//! } +//! # fn main() {} +//! ``` +//! +//! ## In executables +//! +//! Executables should choose a logging implementation and initialize it early in the +//! runtime of the program. Logging implementations will typically include a +//! function to do this. Any log messages generated before +//! the implementation is initialized will be ignored. +//! +//! The executable itself may use the `log` crate to log as well. +//! +//! ### Warning +//! +//! The logging system may only be initialized once. +//! +//! # Available logging implementations +//! +//! In order to produce log output executables have to use +//! a logger implementation compatible with the facade. +//! There are many available implementations to choose from, +//! here are some of the most popular ones: +//! +//! * Simple minimal loggers: +//! * [env_logger] +//! * [simple_logger] +//! * [simplelog] +//! * [pretty_env_logger] +//! * [stderrlog] +//! * [flexi_logger] +//! * Complex configurable frameworks: +//! * [log4rs] +//! * [fern] +//! * Adaptors for other facilities: +//! * [syslog] +//! * [slog-stdlog] +//! +//! # Implementing a Logger +//! +//! Loggers implement the [`Log`] trait. Here's a very basic example that simply +//! logs all messages at the [`Error`][level_link], [`Warn`][level_link] or +//! [`Info`][level_link] levels to stdout: +//! +//! ```edition2018 +//! use log::{Record, Level, Metadata}; +//! +//! struct SimpleLogger; +//! +//! impl log::Log for SimpleLogger { +//! fn enabled(&self, metadata: &Metadata) -> bool { +//! metadata.level() <= Level::Info +//! } +//! +//! fn log(&self, record: &Record) { +//! if self.enabled(record.metadata()) { +//! println!("{} - {}", record.level(), record.args()); +//! } +//! } +//! +//! fn flush(&self) {} +//! } +//! +//! # fn main() {} +//! ``` +//! +//! Loggers are installed by calling the [`set_logger`] function. The maximum +//! log level also needs to be adjusted via the [`set_max_level`] function. The +//! logging facade uses this as an optimization to improve performance of log +//! messages at levels that are disabled. It's important to set it, as it +//! defaults to [`Off`][filter_link], so no log messages will ever be captured! +//! In the case of our example logger, we'll want to set the maximum log level +//! to [`Info`][filter_link], since we ignore any [`Debug`][level_link] or +//! [`Trace`][level_link] level log messages. A logging implementation should +//! provide a function that wraps a call to [`set_logger`] and +//! [`set_max_level`], handling initialization of the logger: +//! +//! ```edition2018 +//! # use log::{Level, Metadata}; +//! # struct SimpleLogger; +//! # impl log::Log for SimpleLogger { +//! # fn enabled(&self, _: &Metadata) -> bool { false } +//! # fn log(&self, _: &log::Record) {} +//! # fn flush(&self) {} +//! # } +//! # fn main() {} +//! use log::{SetLoggerError, LevelFilter}; +//! +//! static LOGGER: SimpleLogger = SimpleLogger; +//! +//! pub fn init() -> Result<(), SetLoggerError> { +//! log::set_logger(&LOGGER) +//! .map(|()| log::set_max_level(LevelFilter::Info)) +//! } +//! ``` +//! +//! Implementations that adjust their configurations at runtime should take care +//! to adjust the maximum log level as well. +//! +//! # Use with `std` +//! +//! `set_logger` requires you to provide a `&'static Log`, which can be hard to +//! obtain if your logger depends on some runtime configuration. The +//! `set_boxed_logger` function is available with the `std` Cargo feature. It is +//! identical to `set_logger` except that it takes a `Box` rather than a +//! `&'static Log`: +//! +//! ```edition2018 +//! # use log::{Level, LevelFilter, Log, SetLoggerError, Metadata}; +//! # struct SimpleLogger; +//! # impl log::Log for SimpleLogger { +//! # fn enabled(&self, _: &Metadata) -> bool { false } +//! # fn log(&self, _: &log::Record) {} +//! # fn flush(&self) {} +//! # } +//! # fn main() {} +//! # #[cfg(feature = "std")] +//! pub fn init() -> Result<(), SetLoggerError> { +//! log::set_boxed_logger(Box::new(SimpleLogger)) +//! .map(|()| log::set_max_level(LevelFilter::Info)) +//! } +//! ``` +//! +//! # Compile time filters +//! +//! Log levels can be statically disabled at compile time via Cargo features. Log invocations at +//! disabled levels will be skipped and will not even be present in the resulting binary. +//! This level is configured separately for release and debug builds. The features are: +//! +//! * `max_level_off` +//! * `max_level_error` +//! * `max_level_warn` +//! * `max_level_info` +//! * `max_level_debug` +//! * `max_level_trace` +//! * `release_max_level_off` +//! * `release_max_level_error` +//! * `release_max_level_warn` +//! * `release_max_level_info` +//! * `release_max_level_debug` +//! * `release_max_level_trace` +//! +//! These features control the value of the `STATIC_MAX_LEVEL` constant. The logging macros check +//! this value before logging a message. By default, no levels are disabled. +//! +//! Libraries should avoid using the max level features because they're global and can't be changed +//! once they're set. +//! +//! For example, a crate can disable trace level logs in debug builds and trace, debug, and info +//! level logs in release builds with the following configuration: +//! +//! ```toml +//! [dependencies] +//! log = { version = "0.4", features = ["max_level_debug", "release_max_level_warn"] } +//! ``` +//! # Crate Feature Flags +//! +//! The following crate feature flags are available in addition to the filters. They are +//! configured in your `Cargo.toml`. +//! +//! * `std` allows use of `std` crate instead of the default `core`. Enables using `std::error` and +//! `set_boxed_logger` functionality. +//! * `serde` enables support for serialization and deserialization of `Level` and `LevelFilter`. +//! +//! ```toml +//! [dependencies] +//! log = { version = "0.4", features = ["std", "serde"] } +//! ``` +//! +//! # Version compatibility +//! +//! The 0.3 and 0.4 versions of the `log` crate are almost entirely compatible. Log messages +//! made using `log` 0.3 will forward transparently to a logger implementation using `log` 0.4. Log +//! messages made using `log` 0.4 will forward to a logger implementation using `log` 0.3, but the +//! module path and file name information associated with the message will unfortunately be lost. +//! +//! [`Log`]: trait.Log.html +//! [level_link]: enum.Level.html +//! [filter_link]: enum.LevelFilter.html +//! [`set_logger`]: fn.set_logger.html +//! [`set_max_level`]: fn.set_max_level.html +//! [`try_set_logger_raw`]: fn.try_set_logger_raw.html +//! [`shutdown_logger_raw`]: fn.shutdown_logger_raw.html +//! [env_logger]: https://docs.rs/env_logger/*/env_logger/ +//! [simple_logger]: https://github.com/borntyping/rust-simple_logger +//! [simplelog]: https://github.com/drakulix/simplelog.rs +//! [pretty_env_logger]: https://docs.rs/pretty_env_logger/*/pretty_env_logger/ +//! [stderrlog]: https://docs.rs/stderrlog/*/stderrlog/ +//! [flexi_logger]: https://docs.rs/flexi_logger/*/flexi_logger/ +//! [syslog]: https://docs.rs/syslog/*/syslog/ +//! [slog-stdlog]: https://docs.rs/slog-stdlog/*/slog_stdlog/ +//! [log4rs]: https://docs.rs/log4rs/*/log4rs/ +//! [fern]: https://docs.rs/fern/*/fern/ + +#![doc( + html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://www.rust-lang.org/favicon.ico", + html_root_url = "https://docs.rs/log/0.4.11" +)] +#![warn(missing_docs)] +#![deny(missing_debug_implementations)] +#![cfg_attr(all(not(feature = "std"), not(test)), no_std)] +// When compiled for the rustc compiler itself we want to make sure that this is +// an unstable crate +#![cfg_attr(rustbuild, feature(staged_api, rustc_private))] +#![cfg_attr(rustbuild, unstable(feature = "rustc_private", issue = "27812"))] + +#[cfg(all(not(feature = "std"), not(test)))] +extern crate core as std; + +#[macro_use] +extern crate cfg_if; + +use std::cmp; +#[cfg(feature = "std")] +use std::error; +use std::fmt; +use std::mem; +use std::str::FromStr; +use std::sync::atomic::{AtomicUsize, Ordering}; + +#[macro_use] +mod macros; +mod serde; + +#[cfg(feature = "kv_unstable")] +pub mod kv; + +// The LOGGER static holds a pointer to the global logger. It is protected by +// the STATE static which determines whether LOGGER has been initialized yet. +static mut LOGGER: &dyn Log = &NopLogger; + +static STATE: AtomicUsize = AtomicUsize::new(0); + +// There are three different states that we care about: the logger's +// uninitialized, the logger's initializing (set_logger's been called but +// LOGGER hasn't actually been set yet), or the logger's active. +const UNINITIALIZED: usize = 0; +const INITIALIZING: usize = 1; +const INITIALIZED: usize = 2; + +static MAX_LOG_LEVEL_FILTER: AtomicUsize = AtomicUsize::new(0); + +static LOG_LEVEL_NAMES: [&str; 6] = ["OFF", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"]; + +static SET_LOGGER_ERROR: &str = "attempted to set a logger after the logging system \ + was already initialized"; +static LEVEL_PARSE_ERROR: &str = + "attempted to convert a string that doesn't match an existing log level"; + +/// An enum representing the available verbosity levels of the logger. +/// +/// Typical usage includes: checking if a certain `Level` is enabled with +/// [`log_enabled!`](macro.log_enabled.html), specifying the `Level` of +/// [`log!`](macro.log.html), and comparing a `Level` directly to a +/// [`LevelFilter`](enum.LevelFilter.html). +#[repr(usize)] +#[derive(Copy, Eq, Debug, Hash)] +pub enum Level { + /// The "error" level. + /// + /// Designates very serious errors. + // This way these line up with the discriminants for LevelFilter below + // This works because Rust treats field-less enums the same way as C does: + // https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-field-less-enumerations + Error = 1, + /// The "warn" level. + /// + /// Designates hazardous situations. + Warn, + /// The "info" level. + /// + /// Designates useful information. + Info, + /// The "debug" level. + /// + /// Designates lower priority information. + Debug, + /// The "trace" level. + /// + /// Designates very low priority, often extremely verbose, information. + Trace, +} + +impl Clone for Level { + #[inline] + fn clone(&self) -> Level { + *self + } +} + +impl PartialEq for Level { + #[inline] + fn eq(&self, other: &Level) -> bool { + *self as usize == *other as usize + } +} + +impl PartialEq for Level { + #[inline] + fn eq(&self, other: &LevelFilter) -> bool { + *self as usize == *other as usize + } +} + +impl PartialOrd for Level { + #[inline] + fn partial_cmp(&self, other: &Level) -> Option { + Some(self.cmp(other)) + } + + #[inline] + fn lt(&self, other: &Level) -> bool { + (*self as usize) < *other as usize + } + + #[inline] + fn le(&self, other: &Level) -> bool { + *self as usize <= *other as usize + } + + #[inline] + fn gt(&self, other: &Level) -> bool { + *self as usize > *other as usize + } + + #[inline] + fn ge(&self, other: &Level) -> bool { + *self as usize >= *other as usize + } +} + +impl PartialOrd for Level { + #[inline] + fn partial_cmp(&self, other: &LevelFilter) -> Option { + Some((*self as usize).cmp(&(*other as usize))) + } + + #[inline] + fn lt(&self, other: &LevelFilter) -> bool { + (*self as usize) < *other as usize + } + + #[inline] + fn le(&self, other: &LevelFilter) -> bool { + *self as usize <= *other as usize + } + + #[inline] + fn gt(&self, other: &LevelFilter) -> bool { + *self as usize > *other as usize + } + + #[inline] + fn ge(&self, other: &LevelFilter) -> bool { + *self as usize >= *other as usize + } +} + +impl Ord for Level { + #[inline] + fn cmp(&self, other: &Level) -> cmp::Ordering { + (*self as usize).cmp(&(*other as usize)) + } +} + +fn ok_or(t: Option, e: E) -> Result { + match t { + Some(t) => Ok(t), + None => Err(e), + } +} + +// Reimplemented here because std::ascii is not available in libcore +fn eq_ignore_ascii_case(a: &str, b: &str) -> bool { + fn to_ascii_uppercase(c: u8) -> u8 { + if c >= b'a' && c <= b'z' { + c - b'a' + b'A' + } else { + c + } + } + + if a.len() == b.len() { + a.bytes() + .zip(b.bytes()) + .all(|(a, b)| to_ascii_uppercase(a) == to_ascii_uppercase(b)) + } else { + false + } +} + +impl FromStr for Level { + type Err = ParseLevelError; + fn from_str(level: &str) -> Result { + ok_or( + LOG_LEVEL_NAMES + .iter() + .position(|&name| eq_ignore_ascii_case(name, level)) + .into_iter() + .filter(|&idx| idx != 0) + .map(|idx| Level::from_usize(idx).unwrap()) + .next(), + ParseLevelError(()), + ) + } +} + +impl fmt::Display for Level { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.pad(LOG_LEVEL_NAMES[*self as usize]) + } +} + +impl Level { + fn from_usize(u: usize) -> Option { + match u { + 1 => Some(Level::Error), + 2 => Some(Level::Warn), + 3 => Some(Level::Info), + 4 => Some(Level::Debug), + 5 => Some(Level::Trace), + _ => None, + } + } + + /// Returns the most verbose logging level. + #[inline] + pub fn max() -> Level { + Level::Trace + } + + /// Converts the `Level` to the equivalent `LevelFilter`. + #[inline] + pub fn to_level_filter(&self) -> LevelFilter { + LevelFilter::from_usize(*self as usize).unwrap() + } +} + +/// An enum representing the available verbosity level filters of the logger. +/// +/// A `LevelFilter` may be compared directly to a [`Level`]. Use this type +/// to get and set the maximum log level with [`max_level()`] and [`set_max_level`]. +/// +/// [`Level`]: enum.Level.html +/// [`max_level()`]: fn.max_level.html +/// [`set_max_level`]: fn.set_max_level.html +#[repr(usize)] +#[derive(Copy, Eq, Debug, Hash)] +pub enum LevelFilter { + /// A level lower than all log levels. + Off, + /// Corresponds to the `Error` log level. + Error, + /// Corresponds to the `Warn` log level. + Warn, + /// Corresponds to the `Info` log level. + Info, + /// Corresponds to the `Debug` log level. + Debug, + /// Corresponds to the `Trace` log level. + Trace, +} + +// Deriving generates terrible impls of these traits + +impl Clone for LevelFilter { + #[inline] + fn clone(&self) -> LevelFilter { + *self + } +} + +impl PartialEq for LevelFilter { + #[inline] + fn eq(&self, other: &LevelFilter) -> bool { + *self as usize == *other as usize + } +} + +impl PartialEq for LevelFilter { + #[inline] + fn eq(&self, other: &Level) -> bool { + other.eq(self) + } +} + +impl PartialOrd for LevelFilter { + #[inline] + fn partial_cmp(&self, other: &LevelFilter) -> Option { + Some(self.cmp(other)) + } + + #[inline] + fn lt(&self, other: &LevelFilter) -> bool { + (*self as usize) < *other as usize + } + + #[inline] + fn le(&self, other: &LevelFilter) -> bool { + *self as usize <= *other as usize + } + + #[inline] + fn gt(&self, other: &LevelFilter) -> bool { + *self as usize > *other as usize + } + + #[inline] + fn ge(&self, other: &LevelFilter) -> bool { + *self as usize >= *other as usize + } +} + +impl PartialOrd for LevelFilter { + #[inline] + fn partial_cmp(&self, other: &Level) -> Option { + Some((*self as usize).cmp(&(*other as usize))) + } + + #[inline] + fn lt(&self, other: &Level) -> bool { + (*self as usize) < *other as usize + } + + #[inline] + fn le(&self, other: &Level) -> bool { + *self as usize <= *other as usize + } + + #[inline] + fn gt(&self, other: &Level) -> bool { + *self as usize > *other as usize + } + + #[inline] + fn ge(&self, other: &Level) -> bool { + *self as usize >= *other as usize + } +} + +impl Ord for LevelFilter { + #[inline] + fn cmp(&self, other: &LevelFilter) -> cmp::Ordering { + (*self as usize).cmp(&(*other as usize)) + } +} + +impl FromStr for LevelFilter { + type Err = ParseLevelError; + fn from_str(level: &str) -> Result { + ok_or( + LOG_LEVEL_NAMES + .iter() + .position(|&name| eq_ignore_ascii_case(name, level)) + .map(|p| LevelFilter::from_usize(p).unwrap()), + ParseLevelError(()), + ) + } +} + +impl fmt::Display for LevelFilter { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.pad(LOG_LEVEL_NAMES[*self as usize]) + } +} + +impl LevelFilter { + fn from_usize(u: usize) -> Option { + match u { + 0 => Some(LevelFilter::Off), + 1 => Some(LevelFilter::Error), + 2 => Some(LevelFilter::Warn), + 3 => Some(LevelFilter::Info), + 4 => Some(LevelFilter::Debug), + 5 => Some(LevelFilter::Trace), + _ => None, + } + } + /// Returns the most verbose logging level filter. + #[inline] + pub fn max() -> LevelFilter { + LevelFilter::Trace + } + + /// Converts `self` to the equivalent `Level`. + /// + /// Returns `None` if `self` is `LevelFilter::Off`. + #[inline] + pub fn to_level(&self) -> Option { + Level::from_usize(*self as usize) + } +} + +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] +enum MaybeStaticStr<'a> { + Static(&'static str), + Borrowed(&'a str), +} + +impl<'a> MaybeStaticStr<'a> { + #[inline] + fn get(&self) -> &'a str { + match *self { + MaybeStaticStr::Static(s) => s, + MaybeStaticStr::Borrowed(s) => s, + } + } +} + +/// The "payload" of a log message. +/// +/// # Use +/// +/// `Record` structures are passed as parameters to the [`log`][method.log] +/// method of the [`Log`] trait. Logger implementors manipulate these +/// structures in order to display log messages. `Record`s are automatically +/// created by the [`log!`] macro and so are not seen by log users. +/// +/// Note that the [`level()`] and [`target()`] accessors are equivalent to +/// `self.metadata().level()` and `self.metadata().target()` respectively. +/// These methods are provided as a convenience for users of this structure. +/// +/// # Example +/// +/// The following example shows a simple logger that displays the level, +/// module path, and message of any `Record` that is passed to it. +/// +/// ```edition2018 +/// struct SimpleLogger; +/// +/// impl log::Log for SimpleLogger { +/// fn enabled(&self, metadata: &log::Metadata) -> bool { +/// true +/// } +/// +/// fn log(&self, record: &log::Record) { +/// if !self.enabled(record.metadata()) { +/// return; +/// } +/// +/// println!("{}:{} -- {}", +/// record.level(), +/// record.target(), +/// record.args()); +/// } +/// fn flush(&self) {} +/// } +/// ``` +/// +/// [method.log]: trait.Log.html#tymethod.log +/// [`Log`]: trait.Log.html +/// [`log!`]: macro.log.html +/// [`level()`]: struct.Record.html#method.level +/// [`target()`]: struct.Record.html#method.target +#[derive(Clone, Debug)] +pub struct Record<'a> { + metadata: Metadata<'a>, + args: fmt::Arguments<'a>, + module_path: Option>, + file: Option>, + line: Option, + #[cfg(feature = "kv_unstable")] + key_values: KeyValues<'a>, +} + +// This wrapper type is only needed so we can +// `#[derive(Debug)]` on `Record`. It also +// provides a useful `Debug` implementation for +// the underlying `Source`. +#[cfg(feature = "kv_unstable")] +#[derive(Clone)] +struct KeyValues<'a>(&'a dyn kv::Source); + +#[cfg(feature = "kv_unstable")] +impl<'a> fmt::Debug for KeyValues<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut visitor = f.debug_map(); + self.0.visit(&mut visitor).map_err(|_| fmt::Error)?; + visitor.finish() + } +} + +impl<'a> Record<'a> { + /// Returns a new builder. + #[inline] + pub fn builder() -> RecordBuilder<'a> { + RecordBuilder::new() + } + + /// The message body. + #[inline] + pub fn args(&self) -> &fmt::Arguments<'a> { + &self.args + } + + /// Metadata about the log directive. + #[inline] + pub fn metadata(&self) -> &Metadata<'a> { + &self.metadata + } + + /// The verbosity level of the message. + #[inline] + pub fn level(&self) -> Level { + self.metadata.level() + } + + /// The name of the target of the directive. + #[inline] + pub fn target(&self) -> &'a str { + self.metadata.target() + } + + /// The module path of the message. + #[inline] + pub fn module_path(&self) -> Option<&'a str> { + self.module_path.map(|s| s.get()) + } + + /// The module path of the message, if it is a `'static` string. + #[inline] + pub fn module_path_static(&self) -> Option<&'static str> { + match self.module_path { + Some(MaybeStaticStr::Static(s)) => Some(s), + _ => None, + } + } + + /// The source file containing the message. + #[inline] + pub fn file(&self) -> Option<&'a str> { + self.file.map(|s| s.get()) + } + + /// The module path of the message, if it is a `'static` string. + #[inline] + pub fn file_static(&self) -> Option<&'static str> { + match self.file { + Some(MaybeStaticStr::Static(s)) => Some(s), + _ => None, + } + } + + /// The line containing the message. + #[inline] + pub fn line(&self) -> Option { + self.line + } + + /// The structued key-value pairs associated with the message. + #[cfg(feature = "kv_unstable")] + #[inline] + pub fn key_values(&self) -> &dyn kv::Source { + self.key_values.0 + } + + /// Create a new [`RecordBuilder`](struct.RecordBuilder.html) based on this record. + #[cfg(feature = "kv_unstable")] + #[inline] + pub fn to_builder(&self) -> RecordBuilder { + RecordBuilder { + record: Record { + metadata: Metadata { + level: self.metadata.level, + target: self.metadata.target, + }, + args: self.args, + module_path: self.module_path, + file: self.file, + line: self.line, + key_values: self.key_values.clone(), + }, + } + } +} + +/// Builder for [`Record`](struct.Record.html). +/// +/// Typically should only be used by log library creators or for testing and "shim loggers". +/// The `RecordBuilder` can set the different parameters of `Record` object, and returns +/// the created object when `build` is called. +/// +/// # Examples +/// +/// +/// ```edition2018 +/// use log::{Level, Record}; +/// +/// let record = Record::builder() +/// .args(format_args!("Error!")) +/// .level(Level::Error) +/// .target("myApp") +/// .file(Some("server.rs")) +/// .line(Some(144)) +/// .module_path(Some("server")) +/// .build(); +/// ``` +/// +/// Alternatively, use [`MetadataBuilder`](struct.MetadataBuilder.html): +/// +/// ```edition2018 +/// use log::{Record, Level, MetadataBuilder}; +/// +/// let error_metadata = MetadataBuilder::new() +/// .target("myApp") +/// .level(Level::Error) +/// .build(); +/// +/// let record = Record::builder() +/// .metadata(error_metadata) +/// .args(format_args!("Error!")) +/// .line(Some(433)) +/// .file(Some("app.rs")) +/// .module_path(Some("server")) +/// .build(); +/// ``` +#[derive(Debug)] +pub struct RecordBuilder<'a> { + record: Record<'a>, +} + +impl<'a> RecordBuilder<'a> { + /// Construct new `RecordBuilder`. + /// + /// The default options are: + /// + /// - `args`: [`format_args!("")`] + /// - `metadata`: [`Metadata::builder().build()`] + /// - `module_path`: `None` + /// - `file`: `None` + /// - `line`: `None` + /// + /// [`format_args!("")`]: https://doc.rust-lang.org/std/macro.format_args.html + /// [`Metadata::builder().build()`]: struct.MetadataBuilder.html#method.build + #[inline] + pub fn new() -> RecordBuilder<'a> { + RecordBuilder { + record: Record { + args: format_args!(""), + metadata: Metadata::builder().build(), + module_path: None, + file: None, + line: None, + #[cfg(feature = "kv_unstable")] + key_values: KeyValues(&Option::None::<(kv::Key, kv::Value)>), + }, + } + } + + /// Set [`args`](struct.Record.html#method.args). + #[inline] + pub fn args(&mut self, args: fmt::Arguments<'a>) -> &mut RecordBuilder<'a> { + self.record.args = args; + self + } + + /// Set [`metadata`](struct.Record.html#method.metadata). Construct a `Metadata` object with [`MetadataBuilder`](struct.MetadataBuilder.html). + #[inline] + pub fn metadata(&mut self, metadata: Metadata<'a>) -> &mut RecordBuilder<'a> { + self.record.metadata = metadata; + self + } + + /// Set [`Metadata::level`](struct.Metadata.html#method.level). + #[inline] + pub fn level(&mut self, level: Level) -> &mut RecordBuilder<'a> { + self.record.metadata.level = level; + self + } + + /// Set [`Metadata::target`](struct.Metadata.html#method.target) + #[inline] + pub fn target(&mut self, target: &'a str) -> &mut RecordBuilder<'a> { + self.record.metadata.target = target; + self + } + + /// Set [`module_path`](struct.Record.html#method.module_path) + #[inline] + pub fn module_path(&mut self, path: Option<&'a str>) -> &mut RecordBuilder<'a> { + self.record.module_path = path.map(MaybeStaticStr::Borrowed); + self + } + + /// Set [`module_path`](struct.Record.html#method.module_path) to a `'static` string + #[inline] + pub fn module_path_static(&mut self, path: Option<&'static str>) -> &mut RecordBuilder<'a> { + self.record.module_path = path.map(MaybeStaticStr::Static); + self + } + + /// Set [`file`](struct.Record.html#method.file) + #[inline] + pub fn file(&mut self, file: Option<&'a str>) -> &mut RecordBuilder<'a> { + self.record.file = file.map(MaybeStaticStr::Borrowed); + self + } + + /// Set [`file`](struct.Record.html#method.file) to a `'static` string. + #[inline] + pub fn file_static(&mut self, file: Option<&'static str>) -> &mut RecordBuilder<'a> { + self.record.file = file.map(MaybeStaticStr::Static); + self + } + + /// Set [`line`](struct.Record.html#method.line) + #[inline] + pub fn line(&mut self, line: Option) -> &mut RecordBuilder<'a> { + self.record.line = line; + self + } + + /// Set [`key_values`](struct.Record.html#method.key_values) + #[cfg(feature = "kv_unstable")] + #[inline] + pub fn key_values(&mut self, kvs: &'a dyn kv::Source) -> &mut RecordBuilder<'a> { + self.record.key_values = KeyValues(kvs); + self + } + + /// Invoke the builder and return a `Record` + #[inline] + pub fn build(&self) -> Record<'a> { + self.record.clone() + } +} + +/// Metadata about a log message. +/// +/// # Use +/// +/// `Metadata` structs are created when users of the library use +/// logging macros. +/// +/// They are consumed by implementations of the `Log` trait in the +/// `enabled` method. +/// +/// `Record`s use `Metadata` to determine the log message's severity +/// and target. +/// +/// Users should use the `log_enabled!` macro in their code to avoid +/// constructing expensive log messages. +/// +/// # Examples +/// +/// ```edition2018 +/// use log::{Record, Level, Metadata}; +/// +/// struct MyLogger; +/// +/// impl log::Log for MyLogger { +/// fn enabled(&self, metadata: &Metadata) -> bool { +/// metadata.level() <= Level::Info +/// } +/// +/// fn log(&self, record: &Record) { +/// if self.enabled(record.metadata()) { +/// println!("{} - {}", record.level(), record.args()); +/// } +/// } +/// fn flush(&self) {} +/// } +/// +/// # fn main(){} +/// ``` +#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] +pub struct Metadata<'a> { + level: Level, + target: &'a str, +} + +impl<'a> Metadata<'a> { + /// Returns a new builder. + #[inline] + pub fn builder() -> MetadataBuilder<'a> { + MetadataBuilder::new() + } + + /// The verbosity level of the message. + #[inline] + pub fn level(&self) -> Level { + self.level + } + + /// The name of the target of the directive. + #[inline] + pub fn target(&self) -> &'a str { + self.target + } +} + +/// Builder for [`Metadata`](struct.Metadata.html). +/// +/// Typically should only be used by log library creators or for testing and "shim loggers". +/// The `MetadataBuilder` can set the different parameters of a `Metadata` object, and returns +/// the created object when `build` is called. +/// +/// # Example +/// +/// ```edition2018 +/// let target = "myApp"; +/// use log::{Level, MetadataBuilder}; +/// let metadata = MetadataBuilder::new() +/// .level(Level::Debug) +/// .target(target) +/// .build(); +/// ``` +#[derive(Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] +pub struct MetadataBuilder<'a> { + metadata: Metadata<'a>, +} + +impl<'a> MetadataBuilder<'a> { + /// Construct a new `MetadataBuilder`. + /// + /// The default options are: + /// + /// - `level`: `Level::Info` + /// - `target`: `""` + #[inline] + pub fn new() -> MetadataBuilder<'a> { + MetadataBuilder { + metadata: Metadata { + level: Level::Info, + target: "", + }, + } + } + + /// Setter for [`level`](struct.Metadata.html#method.level). + #[inline] + pub fn level(&mut self, arg: Level) -> &mut MetadataBuilder<'a> { + self.metadata.level = arg; + self + } + + /// Setter for [`target`](struct.Metadata.html#method.target). + #[inline] + pub fn target(&mut self, target: &'a str) -> &mut MetadataBuilder<'a> { + self.metadata.target = target; + self + } + + /// Returns a `Metadata` object. + #[inline] + pub fn build(&self) -> Metadata<'a> { + self.metadata.clone() + } +} + +/// A trait encapsulating the operations required of a logger. +pub trait Log: Sync + Send { + /// Determines if a log message with the specified metadata would be + /// logged. + /// + /// This is used by the `log_enabled!` macro to allow callers to avoid + /// expensive computation of log message arguments if the message would be + /// discarded anyway. + fn enabled(&self, metadata: &Metadata) -> bool; + + /// Logs the `Record`. + /// + /// Note that `enabled` is *not* necessarily called before this method. + /// Implementations of `log` should perform all necessary filtering + /// internally. + fn log(&self, record: &Record); + + /// Flushes any buffered records. + fn flush(&self); +} + +// Just used as a dummy initial value for LOGGER +struct NopLogger; + +impl Log for NopLogger { + fn enabled(&self, _: &Metadata) -> bool { + false + } + + fn log(&self, _: &Record) {} + fn flush(&self) {} +} + +/// Sets the global maximum log level. +/// +/// Generally, this should only be called by the active logging implementation. +#[inline] +pub fn set_max_level(level: LevelFilter) { + MAX_LOG_LEVEL_FILTER.store(level as usize, Ordering::SeqCst) +} + +/// Returns the current maximum log level. +/// +/// The [`log!`], [`error!`], [`warn!`], [`info!`], [`debug!`], and [`trace!`] macros check +/// this value and discard any message logged at a higher level. The maximum +/// log level is set by the [`set_max_level`] function. +/// +/// [`log!`]: macro.log.html +/// [`error!`]: macro.error.html +/// [`warn!`]: macro.warn.html +/// [`info!`]: macro.info.html +/// [`debug!`]: macro.debug.html +/// [`trace!`]: macro.trace.html +/// [`set_max_level`]: fn.set_max_level.html +#[inline(always)] +pub fn max_level() -> LevelFilter { + // Since `LevelFilter` is `repr(usize)`, + // this transmute is sound if and only if `MAX_LOG_LEVEL_FILTER` + // is set to a usize that is a valid discriminant for `LevelFilter`. + // Since `MAX_LOG_LEVEL_FILTER` is private, the only time it's set + // is by `set_max_level` above, i.e. by casting a `LevelFilter` to `usize`. + // So any usize stored in `MAX_LOG_LEVEL_FILTER` is a valid discriminant. + unsafe { mem::transmute(MAX_LOG_LEVEL_FILTER.load(Ordering::Relaxed)) } +} + +/// Sets the global logger to a `Box`. +/// +/// This is a simple convenience wrapper over `set_logger`, which takes a +/// `Box` rather than a `&'static Log`. See the documentation for +/// [`set_logger`] for more details. +/// +/// Requires the `std` feature. +/// +/// # Errors +/// +/// An error is returned if a logger has already been set. +/// +/// [`set_logger`]: fn.set_logger.html +#[cfg(all(feature = "std", atomic_cas))] +pub fn set_boxed_logger(logger: Box) -> Result<(), SetLoggerError> { + set_logger_inner(|| Box::leak(logger)) +} + +/// Sets the global logger to a `&'static Log`. +/// +/// This function may only be called once in the lifetime of a program. Any log +/// events that occur before the call to `set_logger` completes will be ignored. +/// +/// This function does not typically need to be called manually. Logger +/// implementations should provide an initialization method that installs the +/// logger internally. +/// +/// # Availability +/// +/// This method is available even when the `std` feature is disabled. However, +/// it is currently unavailable on `thumbv6` targets, which lack support for +/// some atomic operations which are used by this function. Even on those +/// targets, [`set_logger_racy`] will be available. +/// +/// # Errors +/// +/// An error is returned if a logger has already been set. +/// +/// # Examples +/// +/// ```edition2018 +/// use log::{error, info, warn, Record, Level, Metadata, LevelFilter}; +/// +/// static MY_LOGGER: MyLogger = MyLogger; +/// +/// struct MyLogger; +/// +/// impl log::Log for MyLogger { +/// fn enabled(&self, metadata: &Metadata) -> bool { +/// metadata.level() <= Level::Info +/// } +/// +/// fn log(&self, record: &Record) { +/// if self.enabled(record.metadata()) { +/// println!("{} - {}", record.level(), record.args()); +/// } +/// } +/// fn flush(&self) {} +/// } +/// +/// # fn main(){ +/// log::set_logger(&MY_LOGGER).unwrap(); +/// log::set_max_level(LevelFilter::Info); +/// +/// info!("hello log"); +/// warn!("warning"); +/// error!("oops"); +/// # } +/// ``` +/// +/// [`set_logger_racy`]: fn.set_logger_racy.html +#[cfg(atomic_cas)] +pub fn set_logger(logger: &'static dyn Log) -> Result<(), SetLoggerError> { + set_logger_inner(|| logger) +} + +#[cfg(atomic_cas)] +fn set_logger_inner(make_logger: F) -> Result<(), SetLoggerError> +where + F: FnOnce() -> &'static dyn Log, +{ + match STATE.compare_and_swap(UNINITIALIZED, INITIALIZING, Ordering::SeqCst) { + UNINITIALIZED => { + unsafe { + LOGGER = make_logger(); + } + STATE.store(INITIALIZED, Ordering::SeqCst); + Ok(()) + } + INITIALIZING => { + while STATE.load(Ordering::SeqCst) == INITIALIZING { + std::sync::atomic::spin_loop_hint(); + } + Err(SetLoggerError(())) + } + _ => Err(SetLoggerError(())), + } +} + +/// A thread-unsafe version of [`set_logger`]. +/// +/// This function is available on all platforms, even those that do not have +/// support for atomics that is needed by [`set_logger`]. +/// +/// In almost all cases, [`set_logger`] should be preferred. +/// +/// # Safety +/// +/// This function is only safe to call when no other logger initialization +/// function is called while this function still executes. +/// +/// This can be upheld by (for example) making sure that **there are no other +/// threads**, and (on embedded) that **interrupts are disabled**. +/// +/// It is safe to use other logging functions while this function runs +/// (including all logging macros). +/// +/// [`set_logger`]: fn.set_logger.html +pub unsafe fn set_logger_racy(logger: &'static dyn Log) -> Result<(), SetLoggerError> { + match STATE.load(Ordering::SeqCst) { + UNINITIALIZED => { + LOGGER = logger; + STATE.store(INITIALIZED, Ordering::SeqCst); + Ok(()) + } + INITIALIZING => { + // This is just plain UB, since we were racing another initialization function + unreachable!("set_logger_racy must not be used with other initialization functions") + } + _ => Err(SetLoggerError(())), + } +} + +/// The type returned by [`set_logger`] if [`set_logger`] has already been called. +/// +/// [`set_logger`]: fn.set_logger.html +#[allow(missing_copy_implementations)] +#[derive(Debug)] +pub struct SetLoggerError(()); + +impl fmt::Display for SetLoggerError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str(SET_LOGGER_ERROR) + } +} + +// The Error trait is not available in libcore +#[cfg(feature = "std")] +impl error::Error for SetLoggerError {} + +/// The type returned by [`from_str`] when the string doesn't match any of the log levels. +/// +/// [`from_str`]: https://doc.rust-lang.org/std/str/trait.FromStr.html#tymethod.from_str +#[allow(missing_copy_implementations)] +#[derive(Debug, PartialEq)] +pub struct ParseLevelError(()); + +impl fmt::Display for ParseLevelError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str(LEVEL_PARSE_ERROR) + } +} + +// The Error trait is not available in libcore +#[cfg(feature = "std")] +impl error::Error for ParseLevelError {} + +/// Returns a reference to the logger. +/// +/// If a logger has not been set, a no-op implementation is returned. +pub fn logger() -> &'static dyn Log { + if STATE.load(Ordering::SeqCst) != INITIALIZED { + static NOP: NopLogger = NopLogger; + &NOP + } else { + unsafe { LOGGER } + } +} + +// WARNING: this is not part of the crate's public API and is subject to change at any time +#[doc(hidden)] +pub fn __private_api_log( + args: fmt::Arguments, + level: Level, + &(target, module_path, file, line): &(&str, &'static str, &'static str, u32), +) { + logger().log( + &Record::builder() + .args(args) + .level(level) + .target(target) + .module_path_static(Some(module_path)) + .file_static(Some(file)) + .line(Some(line)) + .build(), + ); +} + +// WARNING: this is not part of the crate's public API and is subject to change at any time +#[doc(hidden)] +pub fn __private_api_log_lit( + message: &str, + level: Level, + &(target, module_path, file, line): &(&str, &'static str, &'static str, u32), +) { + logger().log( + &Record::builder() + .args(format_args!("{}", message)) + .level(level) + .target(target) + .module_path_static(Some(module_path)) + .file_static(Some(file)) + .line(Some(line)) + .build(), + ); +} + +// WARNING: this is not part of the crate's public API and is subject to change at any time +#[doc(hidden)] +pub fn __private_api_enabled(level: Level, target: &str) -> bool { + logger().enabled(&Metadata::builder().level(level).target(target).build()) +} + +/// The statically resolved maximum log level. +/// +/// See the crate level documentation for information on how to configure this. +/// +/// This value is checked by the log macros, but not by the `Log`ger returned by +/// the [`logger`] function. Code that manually calls functions on that value +/// should compare the level against this value. +/// +/// [`logger`]: fn.logger.html +pub const STATIC_MAX_LEVEL: LevelFilter = MAX_LEVEL_INNER; + +cfg_if! { + if #[cfg(all(not(debug_assertions), feature = "release_max_level_off"))] { + const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Off; + } else if #[cfg(all(not(debug_assertions), feature = "release_max_level_error"))] { + const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Error; + } else if #[cfg(all(not(debug_assertions), feature = "release_max_level_warn"))] { + const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Warn; + } else if #[cfg(all(not(debug_assertions), feature = "release_max_level_info"))] { + const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Info; + } else if #[cfg(all(not(debug_assertions), feature = "release_max_level_debug"))] { + const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Debug; + } else if #[cfg(all(not(debug_assertions), feature = "release_max_level_trace"))] { + const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Trace; + } else if #[cfg(feature = "max_level_off")] { + const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Off; + } else if #[cfg(feature = "max_level_error")] { + const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Error; + } else if #[cfg(feature = "max_level_warn")] { + const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Warn; + } else if #[cfg(feature = "max_level_info")] { + const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Info; + } else if #[cfg(feature = "max_level_debug")] { + const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Debug; + } else { + const MAX_LEVEL_INNER: LevelFilter = LevelFilter::Trace; + } +} + +#[cfg(test)] +mod tests { + extern crate std; + use super::{Level, LevelFilter, ParseLevelError}; + use tests::std::string::ToString; + + #[test] + fn test_levelfilter_from_str() { + let tests = [ + ("off", Ok(LevelFilter::Off)), + ("error", Ok(LevelFilter::Error)), + ("warn", Ok(LevelFilter::Warn)), + ("info", Ok(LevelFilter::Info)), + ("debug", Ok(LevelFilter::Debug)), + ("trace", Ok(LevelFilter::Trace)), + ("OFF", Ok(LevelFilter::Off)), + ("ERROR", Ok(LevelFilter::Error)), + ("WARN", Ok(LevelFilter::Warn)), + ("INFO", Ok(LevelFilter::Info)), + ("DEBUG", Ok(LevelFilter::Debug)), + ("TRACE", Ok(LevelFilter::Trace)), + ("asdf", Err(ParseLevelError(()))), + ]; + for &(s, ref expected) in &tests { + assert_eq!(expected, &s.parse()); + } + } + + #[test] + fn test_level_from_str() { + let tests = [ + ("OFF", Err(ParseLevelError(()))), + ("error", Ok(Level::Error)), + ("warn", Ok(Level::Warn)), + ("info", Ok(Level::Info)), + ("debug", Ok(Level::Debug)), + ("trace", Ok(Level::Trace)), + ("ERROR", Ok(Level::Error)), + ("WARN", Ok(Level::Warn)), + ("INFO", Ok(Level::Info)), + ("DEBUG", Ok(Level::Debug)), + ("TRACE", Ok(Level::Trace)), + ("asdf", Err(ParseLevelError(()))), + ]; + for &(s, ref expected) in &tests { + assert_eq!(expected, &s.parse()); + } + } + + #[test] + fn test_level_show() { + assert_eq!("INFO", Level::Info.to_string()); + assert_eq!("ERROR", Level::Error.to_string()); + } + + #[test] + fn test_levelfilter_show() { + assert_eq!("OFF", LevelFilter::Off.to_string()); + assert_eq!("ERROR", LevelFilter::Error.to_string()); + } + + #[test] + fn test_cross_cmp() { + assert!(Level::Debug > LevelFilter::Error); + assert!(LevelFilter::Warn < Level::Trace); + assert!(LevelFilter::Off < Level::Error); + } + + #[test] + fn test_cross_eq() { + assert!(Level::Error == LevelFilter::Error); + assert!(LevelFilter::Off != Level::Error); + assert!(Level::Trace == LevelFilter::Trace); + } + + #[test] + fn test_to_level() { + assert_eq!(Some(Level::Error), LevelFilter::Error.to_level()); + assert_eq!(None, LevelFilter::Off.to_level()); + assert_eq!(Some(Level::Debug), LevelFilter::Debug.to_level()); + } + + #[test] + fn test_to_level_filter() { + assert_eq!(LevelFilter::Error, Level::Error.to_level_filter()); + assert_eq!(LevelFilter::Trace, Level::Trace.to_level_filter()); + } + + #[test] + #[cfg(feature = "std")] + fn test_error_trait() { + use super::SetLoggerError; + let e = SetLoggerError(()); + assert_eq!( + &e.to_string(), + "attempted to set a logger after the logging system \ + was already initialized" + ); + } + + #[test] + fn test_metadata_builder() { + use super::MetadataBuilder; + let target = "myApp"; + let metadata_test = MetadataBuilder::new() + .level(Level::Debug) + .target(target) + .build(); + assert_eq!(metadata_test.level(), Level::Debug); + assert_eq!(metadata_test.target(), "myApp"); + } + + #[test] + fn test_metadata_convenience_builder() { + use super::Metadata; + let target = "myApp"; + let metadata_test = Metadata::builder() + .level(Level::Debug) + .target(target) + .build(); + assert_eq!(metadata_test.level(), Level::Debug); + assert_eq!(metadata_test.target(), "myApp"); + } + + #[test] + fn test_record_builder() { + use super::{MetadataBuilder, RecordBuilder}; + let target = "myApp"; + let metadata = MetadataBuilder::new().target(target).build(); + let fmt_args = format_args!("hello"); + let record_test = RecordBuilder::new() + .args(fmt_args) + .metadata(metadata) + .module_path(Some("foo")) + .file(Some("bar")) + .line(Some(30)) + .build(); + assert_eq!(record_test.metadata().target(), "myApp"); + assert_eq!(record_test.module_path(), Some("foo")); + assert_eq!(record_test.file(), Some("bar")); + assert_eq!(record_test.line(), Some(30)); + } + + #[test] + fn test_record_convenience_builder() { + use super::{Metadata, Record}; + let target = "myApp"; + let metadata = Metadata::builder().target(target).build(); + let fmt_args = format_args!("hello"); + let record_test = Record::builder() + .args(fmt_args) + .metadata(metadata) + .module_path(Some("foo")) + .file(Some("bar")) + .line(Some(30)) + .build(); + assert_eq!(record_test.target(), "myApp"); + assert_eq!(record_test.module_path(), Some("foo")); + assert_eq!(record_test.file(), Some("bar")); + assert_eq!(record_test.line(), Some(30)); + } + + #[test] + fn test_record_complete_builder() { + use super::{Level, Record}; + let target = "myApp"; + let record_test = Record::builder() + .module_path(Some("foo")) + .file(Some("bar")) + .line(Some(30)) + .target(target) + .level(Level::Error) + .build(); + assert_eq!(record_test.target(), "myApp"); + assert_eq!(record_test.level(), Level::Error); + assert_eq!(record_test.module_path(), Some("foo")); + assert_eq!(record_test.file(), Some("bar")); + assert_eq!(record_test.line(), Some(30)); + } + + #[test] + #[cfg(feature = "kv_unstable")] + fn test_record_key_values_builder() { + use super::Record; + use kv::{self, Visitor}; + + struct TestVisitor { + seen_pairs: usize, + } + + impl<'kvs> Visitor<'kvs> for TestVisitor { + fn visit_pair( + &mut self, + _: kv::Key<'kvs>, + _: kv::Value<'kvs>, + ) -> Result<(), kv::Error> { + self.seen_pairs += 1; + Ok(()) + } + } + + let kvs: &[(&str, i32)] = &[("a", 1), ("b", 2)]; + let record_test = Record::builder().key_values(&kvs).build(); + + let mut visitor = TestVisitor { seen_pairs: 0 }; + + record_test.key_values().visit(&mut visitor).unwrap(); + + assert_eq!(2, visitor.seen_pairs); + } + + #[test] + #[cfg(feature = "kv_unstable")] + fn test_record_key_values_get_coerce() { + use super::Record; + + let kvs: &[(&str, &str)] = &[("a", "1"), ("b", "2")]; + let record = Record::builder().key_values(&kvs).build(); + + assert_eq!( + "2", + record + .key_values() + .get("b".into()) + .expect("missing key") + .to_borrowed_str() + .expect("invalid value") + ); + } +} diff -Nru cargo-0.44.1/vendor/log/src/macros.rs cargo-0.47.0/vendor/log/src/macros.rs --- cargo-0.44.1/vendor/log/src/macros.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/src/macros.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,250 +1,262 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/// The standard logging macro. -/// -/// This macro will generically log with the specified `Level` and `format!` -/// based argument list. -/// -/// # Examples -/// -/// ```edition2018 -/// use log::{log, Level}; -/// -/// # fn main() { -/// let data = (42, "Forty-two"); -/// let private_data = "private"; -/// -/// log!(Level::Error, "Received errors: {}, {}", data.0, data.1); -/// log!(target: "app_events", Level::Warn, "App warning: {}, {}, {}", -/// data.0, data.1, private_data); -/// # } -/// ``` -#[macro_export(local_inner_macros)] -macro_rules! log { - (target: $target:expr, $lvl:expr, $($arg:tt)+) => ({ - let lvl = $lvl; - if lvl <= $crate::STATIC_MAX_LEVEL && lvl <= $crate::max_level() { - $crate::__private_api_log( - __log_format_args!($($arg)+), - lvl, - &($target, __log_module_path!(), __log_file!(), __log_line!()), - ); - } - }); - ($lvl:expr, $($arg:tt)+) => (log!(target: __log_module_path!(), $lvl, $($arg)+)) -} - -/// Logs a message at the error level. -/// -/// # Examples -/// -/// ```edition2018 -/// use log::error; -/// -/// # fn main() { -/// let (err_info, port) = ("No connection", 22); -/// -/// error!("Error: {} on port {}", err_info, port); -/// error!(target: "app_events", "App Error: {}, Port: {}", err_info, 22); -/// # } -/// ``` -#[macro_export(local_inner_macros)] -macro_rules! error { - (target: $target:expr, $($arg:tt)+) => ( - log!(target: $target, $crate::Level::Error, $($arg)+); - ); - ($($arg:tt)+) => ( - log!($crate::Level::Error, $($arg)+); - ) -} - -/// Logs a message at the warn level. -/// -/// # Examples -/// -/// ```edition2018 -/// use log::warn; -/// -/// # fn main() { -/// let warn_description = "Invalid Input"; -/// -/// warn!("Warning! {}!", warn_description); -/// warn!(target: "input_events", "App received warning: {}", warn_description); -/// # } -/// ``` -#[macro_export(local_inner_macros)] -macro_rules! warn { - (target: $target:expr, $($arg:tt)+) => ( - log!(target: $target, $crate::Level::Warn, $($arg)+); - ); - ($($arg:tt)+) => ( - log!($crate::Level::Warn, $($arg)+); - ) -} - -/// Logs a message at the info level. -/// -/// # Examples -/// -/// ```edition2018 -/// use log::info; -/// -/// # fn main() { -/// # struct Connection { port: u32, speed: f32 } -/// let conn_info = Connection { port: 40, speed: 3.20 }; -/// -/// info!("Connected to port {} at {} Mb/s", conn_info.port, conn_info.speed); -/// info!(target: "connection_events", "Successfull connection, port: {}, speed: {}", -/// conn_info.port, conn_info.speed); -/// # } -/// ``` -#[macro_export(local_inner_macros)] -macro_rules! info { - (target: $target:expr, $($arg:tt)+) => ( - log!(target: $target, $crate::Level::Info, $($arg)+); - ); - ($($arg:tt)+) => ( - log!($crate::Level::Info, $($arg)+); - ) -} - -/// Logs a message at the debug level. -/// -/// # Examples -/// -/// ```edition2018 -/// use log::debug; -/// -/// # fn main() { -/// # struct Position { x: f32, y: f32 } -/// let pos = Position { x: 3.234, y: -1.223 }; -/// -/// debug!("New position: x: {}, y: {}", pos.x, pos.y); -/// debug!(target: "app_events", "New position: x: {}, y: {}", pos.x, pos.y); -/// # } -/// ``` -#[macro_export(local_inner_macros)] -macro_rules! debug { - (target: $target:expr, $($arg:tt)+) => ( - log!(target: $target, $crate::Level::Debug, $($arg)+); - ); - ($($arg:tt)+) => ( - log!($crate::Level::Debug, $($arg)+); - ) -} - -/// Logs a message at the trace level. -/// -/// # Examples -/// -/// ```edition2018 -/// use log::trace; -/// -/// # fn main() { -/// # struct Position { x: f32, y: f32 } -/// let pos = Position { x: 3.234, y: -1.223 }; -/// -/// trace!("Position is: x: {}, y: {}", pos.x, pos.y); -/// trace!(target: "app_events", "x is {} and y is {}", -/// if pos.x >= 0.0 { "positive" } else { "negative" }, -/// if pos.y >= 0.0 { "positive" } else { "negative" }); -/// # } -/// ``` -#[macro_export(local_inner_macros)] -macro_rules! trace { - (target: $target:expr, $($arg:tt)+) => ( - log!(target: $target, $crate::Level::Trace, $($arg)+); - ); - ($($arg:tt)+) => ( - log!($crate::Level::Trace, $($arg)+); - ) -} - -/// Determines if a message logged at the specified level in that module will -/// be logged. -/// -/// This can be used to avoid expensive computation of log message arguments if -/// the message would be ignored anyway. -/// -/// # Examples -/// -/// ```edition2018 -/// use log::Level::Debug; -/// use log::{debug, log_enabled}; -/// -/// # fn foo() { -/// if log_enabled!(Debug) { -/// let data = expensive_call(); -/// debug!("expensive debug data: {} {}", data.x, data.y); -/// } -/// if log_enabled!(target: "Global", Debug) { -/// let data = expensive_call(); -/// debug!(target: "Global", "expensive debug data: {} {}", data.x, data.y); -/// } -/// # } -/// # struct Data { x: u32, y: u32 } -/// # fn expensive_call() -> Data { Data { x: 0, y: 0 } } -/// # fn main() {} -/// ``` -#[macro_export(local_inner_macros)] -macro_rules! log_enabled { - (target: $target:expr, $lvl:expr) => {{ - let lvl = $lvl; - lvl <= $crate::STATIC_MAX_LEVEL - && lvl <= $crate::max_level() - && $crate::__private_api_enabled(lvl, $target) - }}; - ($lvl:expr) => { - log_enabled!(target: __log_module_path!(), $lvl) - }; -} - -// The log macro above cannot invoke format_args directly because it uses -// local_inner_macros. A format_args invocation there would resolve to -// $crate::format_args which does not exist. Instead invoke format_args here -// outside of local_inner_macros so that it resolves (probably) to -// core::format_args or std::format_args. Same for the several macros that -// follow. -// -// This is a workaround until we drop support for pre-1.30 compilers. At that -// point we can remove use of local_inner_macros, use $crate:: when invoking -// local macros, and invoke format_args directly. -#[doc(hidden)] -#[macro_export] -macro_rules! __log_format_args { - ($($args:tt)*) => { - format_args!($($args)*) - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __log_module_path { - () => { - module_path!() - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __log_file { - () => { - file!() - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __log_line { - () => { - line!() - }; -} +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// The standard logging macro. +/// +/// This macro will generically log with the specified `Level` and `format!` +/// based argument list. +/// +/// # Examples +/// +/// ```edition2018 +/// use log::{log, Level}; +/// +/// # fn main() { +/// let data = (42, "Forty-two"); +/// let private_data = "private"; +/// +/// log!(Level::Error, "Received errors: {}, {}", data.0, data.1); +/// log!(target: "app_events", Level::Warn, "App warning: {}, {}, {}", +/// data.0, data.1, private_data); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! log { + (target: $target:expr, $lvl:expr, $message:expr) => ({ + let lvl = $lvl; + if lvl <= $crate::STATIC_MAX_LEVEL && lvl <= $crate::max_level() { + // ensure that $message is a valid format string literal + let _ = __log_format_args!($message); + $crate::__private_api_log_lit( + $message, + lvl, + &($target, __log_module_path!(), __log_file!(), __log_line!()), + ); + } + }); + (target: $target:expr, $lvl:expr, $($arg:tt)+) => ({ + let lvl = $lvl; + if lvl <= $crate::STATIC_MAX_LEVEL && lvl <= $crate::max_level() { + $crate::__private_api_log( + __log_format_args!($($arg)+), + lvl, + &($target, __log_module_path!(), __log_file!(), __log_line!()), + ); + } + }); + ($lvl:expr, $($arg:tt)+) => (log!(target: __log_module_path!(), $lvl, $($arg)+)) +} + +/// Logs a message at the error level. +/// +/// # Examples +/// +/// ```edition2018 +/// use log::error; +/// +/// # fn main() { +/// let (err_info, port) = ("No connection", 22); +/// +/// error!("Error: {} on port {}", err_info, port); +/// error!(target: "app_events", "App Error: {}, Port: {}", err_info, 22); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! error { + (target: $target:expr, $($arg:tt)+) => ( + log!(target: $target, $crate::Level::Error, $($arg)+); + ); + ($($arg:tt)+) => ( + log!($crate::Level::Error, $($arg)+); + ) +} + +/// Logs a message at the warn level. +/// +/// # Examples +/// +/// ```edition2018 +/// use log::warn; +/// +/// # fn main() { +/// let warn_description = "Invalid Input"; +/// +/// warn!("Warning! {}!", warn_description); +/// warn!(target: "input_events", "App received warning: {}", warn_description); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! warn { + (target: $target:expr, $($arg:tt)+) => ( + log!(target: $target, $crate::Level::Warn, $($arg)+); + ); + ($($arg:tt)+) => ( + log!($crate::Level::Warn, $($arg)+); + ) +} + +/// Logs a message at the info level. +/// +/// # Examples +/// +/// ```edition2018 +/// use log::info; +/// +/// # fn main() { +/// # struct Connection { port: u32, speed: f32 } +/// let conn_info = Connection { port: 40, speed: 3.20 }; +/// +/// info!("Connected to port {} at {} Mb/s", conn_info.port, conn_info.speed); +/// info!(target: "connection_events", "Successfull connection, port: {}, speed: {}", +/// conn_info.port, conn_info.speed); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! info { + (target: $target:expr, $($arg:tt)+) => ( + log!(target: $target, $crate::Level::Info, $($arg)+); + ); + ($($arg:tt)+) => ( + log!($crate::Level::Info, $($arg)+); + ) +} + +/// Logs a message at the debug level. +/// +/// # Examples +/// +/// ```edition2018 +/// use log::debug; +/// +/// # fn main() { +/// # struct Position { x: f32, y: f32 } +/// let pos = Position { x: 3.234, y: -1.223 }; +/// +/// debug!("New position: x: {}, y: {}", pos.x, pos.y); +/// debug!(target: "app_events", "New position: x: {}, y: {}", pos.x, pos.y); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! debug { + (target: $target:expr, $($arg:tt)+) => ( + log!(target: $target, $crate::Level::Debug, $($arg)+); + ); + ($($arg:tt)+) => ( + log!($crate::Level::Debug, $($arg)+); + ) +} + +/// Logs a message at the trace level. +/// +/// # Examples +/// +/// ```edition2018 +/// use log::trace; +/// +/// # fn main() { +/// # struct Position { x: f32, y: f32 } +/// let pos = Position { x: 3.234, y: -1.223 }; +/// +/// trace!("Position is: x: {}, y: {}", pos.x, pos.y); +/// trace!(target: "app_events", "x is {} and y is {}", +/// if pos.x >= 0.0 { "positive" } else { "negative" }, +/// if pos.y >= 0.0 { "positive" } else { "negative" }); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! trace { + (target: $target:expr, $($arg:tt)+) => ( + log!(target: $target, $crate::Level::Trace, $($arg)+); + ); + ($($arg:tt)+) => ( + log!($crate::Level::Trace, $($arg)+); + ) +} + +/// Determines if a message logged at the specified level in that module will +/// be logged. +/// +/// This can be used to avoid expensive computation of log message arguments if +/// the message would be ignored anyway. +/// +/// # Examples +/// +/// ```edition2018 +/// use log::Level::Debug; +/// use log::{debug, log_enabled}; +/// +/// # fn foo() { +/// if log_enabled!(Debug) { +/// let data = expensive_call(); +/// debug!("expensive debug data: {} {}", data.x, data.y); +/// } +/// if log_enabled!(target: "Global", Debug) { +/// let data = expensive_call(); +/// debug!(target: "Global", "expensive debug data: {} {}", data.x, data.y); +/// } +/// # } +/// # struct Data { x: u32, y: u32 } +/// # fn expensive_call() -> Data { Data { x: 0, y: 0 } } +/// # fn main() {} +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! log_enabled { + (target: $target:expr, $lvl:expr) => {{ + let lvl = $lvl; + lvl <= $crate::STATIC_MAX_LEVEL + && lvl <= $crate::max_level() + && $crate::__private_api_enabled(lvl, $target) + }}; + ($lvl:expr) => { + log_enabled!(target: __log_module_path!(), $lvl) + }; +} + +// The log macro above cannot invoke format_args directly because it uses +// local_inner_macros. A format_args invocation there would resolve to +// $crate::format_args which does not exist. Instead invoke format_args here +// outside of local_inner_macros so that it resolves (probably) to +// core::format_args or std::format_args. Same for the several macros that +// follow. +// +// This is a workaround until we drop support for pre-1.30 compilers. At that +// point we can remove use of local_inner_macros, use $crate:: when invoking +// local macros, and invoke format_args directly. +#[doc(hidden)] +#[macro_export] +macro_rules! __log_format_args { + ($($args:tt)*) => { + format_args!($($args)*) + }; +} + +#[doc(hidden)] +#[macro_export] +macro_rules! __log_module_path { + () => { + module_path!() + }; +} + +#[doc(hidden)] +#[macro_export] +macro_rules! __log_file { + () => { + file!() + }; +} + +#[doc(hidden)] +#[macro_export] +macro_rules! __log_line { + () => { + line!() + }; +} diff -Nru cargo-0.44.1/vendor/log/src/serde.rs cargo-0.47.0/vendor/log/src/serde.rs --- cargo-0.44.1/vendor/log/src/serde.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/src/serde.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,329 +1,329 @@ -#![cfg(feature = "serde")] - -extern crate serde; -use self::serde::de::{ - Deserialize, DeserializeSeed, Deserializer, EnumAccess, Error, Unexpected, VariantAccess, - Visitor, -}; -use self::serde::ser::{Serialize, Serializer}; - -use {Level, LevelFilter, LOG_LEVEL_NAMES}; - -use std::fmt; -use std::str::{self, FromStr}; - -// The Deserialize impls are handwritten to be case insensitive using FromStr. - -impl Serialize for Level { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - match *self { - Level::Error => serializer.serialize_unit_variant("Level", 0, "ERROR"), - Level::Warn => serializer.serialize_unit_variant("Level", 1, "WARN"), - Level::Info => serializer.serialize_unit_variant("Level", 2, "INFO"), - Level::Debug => serializer.serialize_unit_variant("Level", 3, "DEBUG"), - Level::Trace => serializer.serialize_unit_variant("Level", 4, "TRACE"), - } - } -} - -impl<'de> Deserialize<'de> for Level { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - struct LevelIdentifier; - - impl<'de> Visitor<'de> for LevelIdentifier { - type Value = Level; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("log level") - } - - fn visit_str(self, s: &str) -> Result - where - E: Error, - { - // Case insensitive. - FromStr::from_str(s).map_err(|_| Error::unknown_variant(s, &LOG_LEVEL_NAMES[1..])) - } - - fn visit_bytes(self, value: &[u8]) -> Result - where - E: Error, - { - let variant = str::from_utf8(value) - .map_err(|_| Error::invalid_value(Unexpected::Bytes(value), &self))?; - - self.visit_str(variant) - } - } - - impl<'de> DeserializeSeed<'de> for LevelIdentifier { - type Value = Level; - - fn deserialize(self, deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_identifier(LevelIdentifier) - } - } - - struct LevelEnum; - - impl<'de> Visitor<'de> for LevelEnum { - type Value = Level; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("log level") - } - - fn visit_enum(self, value: A) -> Result - where - A: EnumAccess<'de>, - { - let (level, variant) = value.variant_seed(LevelIdentifier)?; - // Every variant is a unit variant. - variant.unit_variant()?; - Ok(level) - } - } - - deserializer.deserialize_enum("Level", &LOG_LEVEL_NAMES[1..], LevelEnum) - } -} - -impl Serialize for LevelFilter { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - match *self { - LevelFilter::Off => serializer.serialize_unit_variant("LevelFilter", 0, "OFF"), - LevelFilter::Error => serializer.serialize_unit_variant("LevelFilter", 1, "ERROR"), - LevelFilter::Warn => serializer.serialize_unit_variant("LevelFilter", 2, "WARN"), - LevelFilter::Info => serializer.serialize_unit_variant("LevelFilter", 3, "INFO"), - LevelFilter::Debug => serializer.serialize_unit_variant("LevelFilter", 4, "DEBUG"), - LevelFilter::Trace => serializer.serialize_unit_variant("LevelFilter", 5, "TRACE"), - } - } -} - -impl<'de> Deserialize<'de> for LevelFilter { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - struct LevelFilterIdentifier; - - impl<'de> Visitor<'de> for LevelFilterIdentifier { - type Value = LevelFilter; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("log level filter") - } - - fn visit_str(self, s: &str) -> Result - where - E: Error, - { - // Case insensitive. - FromStr::from_str(s).map_err(|_| Error::unknown_variant(s, &LOG_LEVEL_NAMES)) - } - - fn visit_bytes(self, value: &[u8]) -> Result - where - E: Error, - { - let variant = str::from_utf8(value) - .map_err(|_| Error::invalid_value(Unexpected::Bytes(value), &self))?; - - self.visit_str(variant) - } - } - - impl<'de> DeserializeSeed<'de> for LevelFilterIdentifier { - type Value = LevelFilter; - - fn deserialize(self, deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_identifier(LevelFilterIdentifier) - } - } - - struct LevelFilterEnum; - - impl<'de> Visitor<'de> for LevelFilterEnum { - type Value = LevelFilter; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("log level filter") - } - - fn visit_enum(self, value: A) -> Result - where - A: EnumAccess<'de>, - { - let (level_filter, variant) = value.variant_seed(LevelFilterIdentifier)?; - // Every variant is a unit variant. - variant.unit_variant()?; - Ok(level_filter) - } - } - - deserializer.deserialize_enum("LevelFilter", &LOG_LEVEL_NAMES, LevelFilterEnum) - } -} - -#[cfg(test)] -mod tests { - extern crate serde_test; - use self::serde_test::{assert_de_tokens, assert_de_tokens_error, assert_tokens, Token}; - - use {Level, LevelFilter}; - - fn level_token(variant: &'static str) -> Token { - Token::UnitVariant { - name: "Level", - variant: variant, - } - } - - fn level_bytes_tokens(variant: &'static [u8]) -> [Token; 3] { - [ - Token::Enum { name: "Level" }, - Token::Bytes(variant), - Token::Unit, - ] - } - - fn level_filter_token(variant: &'static str) -> Token { - Token::UnitVariant { - name: "LevelFilter", - variant: variant, - } - } - - fn level_filter_bytes_tokens(variant: &'static [u8]) -> [Token; 3] { - [ - Token::Enum { - name: "LevelFilter", - }, - Token::Bytes(variant), - Token::Unit, - ] - } - - #[test] - fn test_level_ser_de() { - let cases = [ - (Level::Error, [level_token("ERROR")]), - (Level::Warn, [level_token("WARN")]), - (Level::Info, [level_token("INFO")]), - (Level::Debug, [level_token("DEBUG")]), - (Level::Trace, [level_token("TRACE")]), - ]; - - for &(s, expected) in &cases { - assert_tokens(&s, &expected); - } - } - - #[test] - fn test_level_case_insensitive() { - let cases = [ - (Level::Error, [level_token("error")]), - (Level::Warn, [level_token("warn")]), - (Level::Info, [level_token("info")]), - (Level::Debug, [level_token("debug")]), - (Level::Trace, [level_token("trace")]), - ]; - - for &(s, expected) in &cases { - assert_de_tokens(&s, &expected); - } - } - - #[test] - fn test_level_de_bytes() { - let cases = [ - (Level::Error, level_bytes_tokens(b"ERROR")), - (Level::Warn, level_bytes_tokens(b"WARN")), - (Level::Info, level_bytes_tokens(b"INFO")), - (Level::Debug, level_bytes_tokens(b"DEBUG")), - (Level::Trace, level_bytes_tokens(b"TRACE")), - ]; - - for &(value, tokens) in &cases { - assert_de_tokens(&value, &tokens); - } - } - - #[test] - fn test_level_de_error() { - let msg = "unknown variant `errorx`, expected one of \ - `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`"; - assert_de_tokens_error::(&[level_token("errorx")], msg); - } - - #[test] - fn test_level_filter_ser_de() { - let cases = [ - (LevelFilter::Off, [level_filter_token("OFF")]), - (LevelFilter::Error, [level_filter_token("ERROR")]), - (LevelFilter::Warn, [level_filter_token("WARN")]), - (LevelFilter::Info, [level_filter_token("INFO")]), - (LevelFilter::Debug, [level_filter_token("DEBUG")]), - (LevelFilter::Trace, [level_filter_token("TRACE")]), - ]; - - for &(s, expected) in &cases { - assert_tokens(&s, &expected); - } - } - - #[test] - fn test_level_filter_case_insensitive() { - let cases = [ - (LevelFilter::Off, [level_filter_token("off")]), - (LevelFilter::Error, [level_filter_token("error")]), - (LevelFilter::Warn, [level_filter_token("warn")]), - (LevelFilter::Info, [level_filter_token("info")]), - (LevelFilter::Debug, [level_filter_token("debug")]), - (LevelFilter::Trace, [level_filter_token("trace")]), - ]; - - for &(s, expected) in &cases { - assert_de_tokens(&s, &expected); - } - } - - #[test] - fn test_level_filter_de_bytes() { - let cases = [ - (LevelFilter::Off, level_filter_bytes_tokens(b"OFF")), - (LevelFilter::Error, level_filter_bytes_tokens(b"ERROR")), - (LevelFilter::Warn, level_filter_bytes_tokens(b"WARN")), - (LevelFilter::Info, level_filter_bytes_tokens(b"INFO")), - (LevelFilter::Debug, level_filter_bytes_tokens(b"DEBUG")), - (LevelFilter::Trace, level_filter_bytes_tokens(b"TRACE")), - ]; - - for &(value, tokens) in &cases { - assert_de_tokens(&value, &tokens); - } - } - - #[test] - fn test_level_filter_de_error() { - let msg = "unknown variant `errorx`, expected one of \ - `OFF`, `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`"; - assert_de_tokens_error::(&[level_filter_token("errorx")], msg); - } -} +#![cfg(feature = "serde")] + +extern crate serde; +use self::serde::de::{ + Deserialize, DeserializeSeed, Deserializer, EnumAccess, Error, Unexpected, VariantAccess, + Visitor, +}; +use self::serde::ser::{Serialize, Serializer}; + +use {Level, LevelFilter, LOG_LEVEL_NAMES}; + +use std::fmt; +use std::str::{self, FromStr}; + +// The Deserialize impls are handwritten to be case insensitive using FromStr. + +impl Serialize for Level { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + Level::Error => serializer.serialize_unit_variant("Level", 0, "ERROR"), + Level::Warn => serializer.serialize_unit_variant("Level", 1, "WARN"), + Level::Info => serializer.serialize_unit_variant("Level", 2, "INFO"), + Level::Debug => serializer.serialize_unit_variant("Level", 3, "DEBUG"), + Level::Trace => serializer.serialize_unit_variant("Level", 4, "TRACE"), + } + } +} + +impl<'de> Deserialize<'de> for Level { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct LevelIdentifier; + + impl<'de> Visitor<'de> for LevelIdentifier { + type Value = Level; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("log level") + } + + fn visit_str(self, s: &str) -> Result + where + E: Error, + { + // Case insensitive. + FromStr::from_str(s).map_err(|_| Error::unknown_variant(s, &LOG_LEVEL_NAMES[1..])) + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + E: Error, + { + let variant = str::from_utf8(value) + .map_err(|_| Error::invalid_value(Unexpected::Bytes(value), &self))?; + + self.visit_str(variant) + } + } + + impl<'de> DeserializeSeed<'de> for LevelIdentifier { + type Value = Level; + + fn deserialize(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_identifier(LevelIdentifier) + } + } + + struct LevelEnum; + + impl<'de> Visitor<'de> for LevelEnum { + type Value = Level; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("log level") + } + + fn visit_enum(self, value: A) -> Result + where + A: EnumAccess<'de>, + { + let (level, variant) = value.variant_seed(LevelIdentifier)?; + // Every variant is a unit variant. + variant.unit_variant()?; + Ok(level) + } + } + + deserializer.deserialize_enum("Level", &LOG_LEVEL_NAMES[1..], LevelEnum) + } +} + +impl Serialize for LevelFilter { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + LevelFilter::Off => serializer.serialize_unit_variant("LevelFilter", 0, "OFF"), + LevelFilter::Error => serializer.serialize_unit_variant("LevelFilter", 1, "ERROR"), + LevelFilter::Warn => serializer.serialize_unit_variant("LevelFilter", 2, "WARN"), + LevelFilter::Info => serializer.serialize_unit_variant("LevelFilter", 3, "INFO"), + LevelFilter::Debug => serializer.serialize_unit_variant("LevelFilter", 4, "DEBUG"), + LevelFilter::Trace => serializer.serialize_unit_variant("LevelFilter", 5, "TRACE"), + } + } +} + +impl<'de> Deserialize<'de> for LevelFilter { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct LevelFilterIdentifier; + + impl<'de> Visitor<'de> for LevelFilterIdentifier { + type Value = LevelFilter; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("log level filter") + } + + fn visit_str(self, s: &str) -> Result + where + E: Error, + { + // Case insensitive. + FromStr::from_str(s).map_err(|_| Error::unknown_variant(s, &LOG_LEVEL_NAMES)) + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + E: Error, + { + let variant = str::from_utf8(value) + .map_err(|_| Error::invalid_value(Unexpected::Bytes(value), &self))?; + + self.visit_str(variant) + } + } + + impl<'de> DeserializeSeed<'de> for LevelFilterIdentifier { + type Value = LevelFilter; + + fn deserialize(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_identifier(LevelFilterIdentifier) + } + } + + struct LevelFilterEnum; + + impl<'de> Visitor<'de> for LevelFilterEnum { + type Value = LevelFilter; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("log level filter") + } + + fn visit_enum(self, value: A) -> Result + where + A: EnumAccess<'de>, + { + let (level_filter, variant) = value.variant_seed(LevelFilterIdentifier)?; + // Every variant is a unit variant. + variant.unit_variant()?; + Ok(level_filter) + } + } + + deserializer.deserialize_enum("LevelFilter", &LOG_LEVEL_NAMES, LevelFilterEnum) + } +} + +#[cfg(test)] +mod tests { + extern crate serde_test; + use self::serde_test::{assert_de_tokens, assert_de_tokens_error, assert_tokens, Token}; + + use {Level, LevelFilter}; + + fn level_token(variant: &'static str) -> Token { + Token::UnitVariant { + name: "Level", + variant: variant, + } + } + + fn level_bytes_tokens(variant: &'static [u8]) -> [Token; 3] { + [ + Token::Enum { name: "Level" }, + Token::Bytes(variant), + Token::Unit, + ] + } + + fn level_filter_token(variant: &'static str) -> Token { + Token::UnitVariant { + name: "LevelFilter", + variant: variant, + } + } + + fn level_filter_bytes_tokens(variant: &'static [u8]) -> [Token; 3] { + [ + Token::Enum { + name: "LevelFilter", + }, + Token::Bytes(variant), + Token::Unit, + ] + } + + #[test] + fn test_level_ser_de() { + let cases = [ + (Level::Error, [level_token("ERROR")]), + (Level::Warn, [level_token("WARN")]), + (Level::Info, [level_token("INFO")]), + (Level::Debug, [level_token("DEBUG")]), + (Level::Trace, [level_token("TRACE")]), + ]; + + for &(s, expected) in &cases { + assert_tokens(&s, &expected); + } + } + + #[test] + fn test_level_case_insensitive() { + let cases = [ + (Level::Error, [level_token("error")]), + (Level::Warn, [level_token("warn")]), + (Level::Info, [level_token("info")]), + (Level::Debug, [level_token("debug")]), + (Level::Trace, [level_token("trace")]), + ]; + + for &(s, expected) in &cases { + assert_de_tokens(&s, &expected); + } + } + + #[test] + fn test_level_de_bytes() { + let cases = [ + (Level::Error, level_bytes_tokens(b"ERROR")), + (Level::Warn, level_bytes_tokens(b"WARN")), + (Level::Info, level_bytes_tokens(b"INFO")), + (Level::Debug, level_bytes_tokens(b"DEBUG")), + (Level::Trace, level_bytes_tokens(b"TRACE")), + ]; + + for &(value, tokens) in &cases { + assert_de_tokens(&value, &tokens); + } + } + + #[test] + fn test_level_de_error() { + let msg = "unknown variant `errorx`, expected one of \ + `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`"; + assert_de_tokens_error::(&[level_token("errorx")], msg); + } + + #[test] + fn test_level_filter_ser_de() { + let cases = [ + (LevelFilter::Off, [level_filter_token("OFF")]), + (LevelFilter::Error, [level_filter_token("ERROR")]), + (LevelFilter::Warn, [level_filter_token("WARN")]), + (LevelFilter::Info, [level_filter_token("INFO")]), + (LevelFilter::Debug, [level_filter_token("DEBUG")]), + (LevelFilter::Trace, [level_filter_token("TRACE")]), + ]; + + for &(s, expected) in &cases { + assert_tokens(&s, &expected); + } + } + + #[test] + fn test_level_filter_case_insensitive() { + let cases = [ + (LevelFilter::Off, [level_filter_token("off")]), + (LevelFilter::Error, [level_filter_token("error")]), + (LevelFilter::Warn, [level_filter_token("warn")]), + (LevelFilter::Info, [level_filter_token("info")]), + (LevelFilter::Debug, [level_filter_token("debug")]), + (LevelFilter::Trace, [level_filter_token("trace")]), + ]; + + for &(s, expected) in &cases { + assert_de_tokens(&s, &expected); + } + } + + #[test] + fn test_level_filter_de_bytes() { + let cases = [ + (LevelFilter::Off, level_filter_bytes_tokens(b"OFF")), + (LevelFilter::Error, level_filter_bytes_tokens(b"ERROR")), + (LevelFilter::Warn, level_filter_bytes_tokens(b"WARN")), + (LevelFilter::Info, level_filter_bytes_tokens(b"INFO")), + (LevelFilter::Debug, level_filter_bytes_tokens(b"DEBUG")), + (LevelFilter::Trace, level_filter_bytes_tokens(b"TRACE")), + ]; + + for &(value, tokens) in &cases { + assert_de_tokens(&value, &tokens); + } + } + + #[test] + fn test_level_filter_de_error() { + let msg = "unknown variant `errorx`, expected one of \ + `OFF`, `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`"; + assert_de_tokens_error::(&[level_filter_token("errorx")], msg); + } +} diff -Nru cargo-0.44.1/vendor/log/tests/filters.rs cargo-0.47.0/vendor/log/tests/filters.rs --- cargo-0.44.1/vendor/log/tests/filters.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/log/tests/filters.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,72 +1,72 @@ -#[macro_use] -extern crate log; - -use log::{Level, LevelFilter, Log, Metadata, Record}; -use std::sync::{Arc, Mutex}; - -#[cfg(feature = "std")] -use log::set_boxed_logger; - -#[cfg(not(feature = "std"))] -fn set_boxed_logger(logger: Box) -> Result<(), log::SetLoggerError> { - log::set_logger(unsafe { &*Box::into_raw(logger) }) -} - -struct State { - last_log: Mutex>, -} - -struct Logger(Arc); - -impl Log for Logger { - fn enabled(&self, _: &Metadata) -> bool { - true - } - - fn log(&self, record: &Record) { - *self.0.last_log.lock().unwrap() = Some(record.level()); - } - fn flush(&self) {} -} - -fn main() { - let me = Arc::new(State { - last_log: Mutex::new(None), - }); - let a = me.clone(); - set_boxed_logger(Box::new(Logger(me))).unwrap(); - - test(&a, LevelFilter::Off); - test(&a, LevelFilter::Error); - test(&a, LevelFilter::Warn); - test(&a, LevelFilter::Info); - test(&a, LevelFilter::Debug); - test(&a, LevelFilter::Trace); -} - -fn test(a: &State, filter: LevelFilter) { - log::set_max_level(filter); - error!(""); - last(&a, t(Level::Error, filter)); - warn!(""); - last(&a, t(Level::Warn, filter)); - info!(""); - last(&a, t(Level::Info, filter)); - debug!(""); - last(&a, t(Level::Debug, filter)); - trace!(""); - last(&a, t(Level::Trace, filter)); - - fn t(lvl: Level, filter: LevelFilter) -> Option { - if lvl <= filter { - Some(lvl) - } else { - None - } - } -} - -fn last(state: &State, expected: Option) { - let lvl = state.last_log.lock().unwrap().take(); - assert_eq!(lvl, expected); -} +#[macro_use] +extern crate log; + +use log::{Level, LevelFilter, Log, Metadata, Record}; +use std::sync::{Arc, Mutex}; + +#[cfg(feature = "std")] +use log::set_boxed_logger; + +#[cfg(not(feature = "std"))] +fn set_boxed_logger(logger: Box) -> Result<(), log::SetLoggerError> { + log::set_logger(Box::leak(logger)) +} + +struct State { + last_log: Mutex>, +} + +struct Logger(Arc); + +impl Log for Logger { + fn enabled(&self, _: &Metadata) -> bool { + true + } + + fn log(&self, record: &Record) { + *self.0.last_log.lock().unwrap() = Some(record.level()); + } + fn flush(&self) {} +} + +fn main() { + let me = Arc::new(State { + last_log: Mutex::new(None), + }); + let a = me.clone(); + set_boxed_logger(Box::new(Logger(me))).unwrap(); + + test(&a, LevelFilter::Off); + test(&a, LevelFilter::Error); + test(&a, LevelFilter::Warn); + test(&a, LevelFilter::Info); + test(&a, LevelFilter::Debug); + test(&a, LevelFilter::Trace); +} + +fn test(a: &State, filter: LevelFilter) { + log::set_max_level(filter); + error!(""); + last(&a, t(Level::Error, filter)); + warn!(""); + last(&a, t(Level::Warn, filter)); + info!(""); + last(&a, t(Level::Info, filter)); + debug!(""); + last(&a, t(Level::Debug, filter)); + trace!(""); + last(&a, t(Level::Trace, filter)); + + fn t(lvl: Level, filter: LevelFilter) -> Option { + if lvl <= filter { + Some(lvl) + } else { + None + } + } +} + +fn last(state: &State, expected: Option) { + let lvl = state.last_log.lock().unwrap().take(); + assert_eq!(lvl, expected); +} diff -Nru cargo-0.44.1/vendor/log/tests/macros.rs cargo-0.47.0/vendor/log/tests/macros.rs --- cargo-0.44.1/vendor/log/tests/macros.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/log/tests/macros.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,36 @@ +#[macro_use] +extern crate log; + +#[test] +fn base() { + info!("hello"); + info!("hello",); +} + +#[test] +fn base_expr_context() { + let _ = info!("hello"); +} + +#[test] +fn with_args() { + info!("hello {}", "cats"); + info!("hello {}", "cats",); + info!("hello {}", "cats",); +} + +#[test] +fn with_args_expr_context() { + match "cats" { + cats => info!("hello {}", cats), + }; +} + +#[test] +fn with_named_args() { + let cats = "cats"; + + info!("hello {cats}", cats = cats); + info!("hello {cats}", cats = cats,); + info!("hello {cats}", cats = cats,); +} diff -Nru cargo-0.44.1/vendor/log/triagebot.toml cargo-0.47.0/vendor/log/triagebot.toml --- cargo-0.44.1/vendor/log/triagebot.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/log/triagebot.toml 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1 @@ +[assign] diff -Nru cargo-0.44.1/vendor/maybe-uninit/build.rs cargo-0.47.0/vendor/maybe-uninit/build.rs --- cargo-0.44.1/vendor/maybe-uninit/build.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/maybe-uninit/build.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -use std::env; -use std::process::Command; -use std::str::FromStr; - -fn main() { - let minor = match rustc_minor_version() { - Some(minor) => minor, - None => return, - }; - if minor >= 22 { - println!("cargo:rustc-cfg=derive_copy"); - } - if minor >= 28 { - println!("cargo:rustc-cfg=repr_transparent"); - } - if minor >= 36 { - println!("cargo:rustc-cfg=native_uninit"); - } - -} - -fn rustc_minor_version() -> Option { - let rustc = env::var_os("RUSTC"); - - let output = rustc.and_then(|rustc| { - Command::new(rustc).arg("--version").output().ok() - }); - - let version = output.and_then(|output| { - String::from_utf8(output.stdout).ok() - }); - - let version = if let Some(version) = version { - version - } else { - return None; - }; - - let mut pieces = version.split('.'); - if pieces.next() != Some("rustc 1") { - return None; - } - - let next = match pieces.next() { - Some(next) => next, - None => return None, - }; - - u32::from_str(next).ok() -} diff -Nru cargo-0.44.1/vendor/maybe-uninit/.cargo-checksum.json cargo-0.47.0/vendor/maybe-uninit/.cargo-checksum.json --- cargo-0.44.1/vendor/maybe-uninit/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/maybe-uninit/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{},"package":"60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/maybe-uninit/Cargo.toml cargo-0.47.0/vendor/maybe-uninit/Cargo.toml --- cargo-0.44.1/vendor/maybe-uninit/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/maybe-uninit/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "maybe-uninit" -version = "2.0.0" -authors = ["est31 ", "The Rust Project Developers"] -description = "MaybeUninit for friends of backwards compatibility" -readme = "README.md" -license = "Apache-2.0 OR MIT" -repository = "https://github.com/est31/maybe-uninit" - -[dependencies] diff -Nru cargo-0.44.1/vendor/maybe-uninit/LICENSE-APACHE cargo-0.47.0/vendor/maybe-uninit/LICENSE-APACHE --- cargo-0.44.1/vendor/maybe-uninit/LICENSE-APACHE 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/maybe-uninit/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru cargo-0.44.1/vendor/maybe-uninit/LICENSE-MIT cargo-0.47.0/vendor/maybe-uninit/LICENSE-MIT --- cargo-0.44.1/vendor/maybe-uninit/LICENSE-MIT 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/maybe-uninit/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -Copyright (c) 2010 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.44.1/vendor/maybe-uninit/README.md cargo-0.47.0/vendor/maybe-uninit/README.md --- cargo-0.44.1/vendor/maybe-uninit/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/maybe-uninit/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# maybe-uninit - -Quite often, uses of `std::mem::uninitialized()` end up in unsound code. -Therefore, the `MaybeUninit` union has been added to `std::mem` and `std::mem::uninitialized()` is being deprecated. -However, `MaybeUninit` has been added quite recently. -Sometimes you might want to support older versions of Rust as well. -Here is where `maybe-uninit` comes in: it supports stable Rust versions starting with 1.20.0. - -Sadly, a feature-complete implementation of `MaybeUninit` is not possible on stable Rust. -Therefore, the library offers the guarantees of `MaybeUninit` in a staged fashion: - -* Rust 1.36.0 onward: `MaybeUninit` implementation of Rust stable is being re-exported - -* Rust 1.22.x - 1.35.0: No panicing on uninhabited types, - unsoundness when used with types like `bool` or enums. - However, there is protection from accidentially `Drop`ing e.g. during unwind! - -* Rust 1.20.x - 1.21.x: No support for Copy/Clone of `MaybeUninit`, - even if `T` impls `Copy` or even `Clone`. diff -Nru cargo-0.44.1/vendor/maybe-uninit/src/lib.rs cargo-0.47.0/vendor/maybe-uninit/src/lib.rs --- cargo-0.44.1/vendor/maybe-uninit/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/maybe-uninit/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -#![no_std] - -#[cfg(not(native_uninit))] -mod maybe_uninit; - -#[cfg(not(native_uninit))] -pub use maybe_uninit::MaybeUninit; - -#[cfg(native_uninit)] -pub use core::mem::MaybeUninit; diff -Nru cargo-0.44.1/vendor/maybe-uninit/src/maybe_uninit.rs cargo-0.47.0/vendor/maybe-uninit/src/maybe_uninit.rs --- cargo-0.44.1/vendor/maybe-uninit/src/maybe_uninit.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/maybe-uninit/src/maybe_uninit.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,594 +0,0 @@ -//use core::intrinsics; -use core::mem::ManuallyDrop; -use core::ptr; -use core::mem::uninitialized; - -/// A wrapper type to construct uninitialized instances of `T`. -/// -/// # Initialization invariant -/// -/// The compiler, in general, assumes that variables are properly initialized -/// at their respective type. For example, a variable of reference type must -/// be aligned and non-NULL. This is an invariant that must *always* be upheld, -/// even in unsafe code. As a consequence, zero-initializing a variable of reference -/// type causes instantaneous [undefined behavior][ub], no matter whether that reference -/// ever gets used to access memory: -/// -/// ```rust,no_run -/// # extern crate maybe_uninit; -/// # fn main() { -/// use maybe_uninit::MaybeUninit; -/// use std::mem; -/// -/// let x: &i32 = unsafe { mem::zeroed() }; // undefined behavior! -/// // The equivalent code with `MaybeUninit<&i32>`: -/// let x: &i32 = unsafe { MaybeUninit::zeroed().assume_init() }; // undefined behavior! -/// # } -/// ``` -/// -/// This is exploited by the compiler for various optimizations, such as eliding -/// run-time checks and optimizing `enum` layout. -/// -/// Similarly, entirely uninitialized memory may have any content, while a `bool` must -/// always be `true` or `false`. Hence, creating an uninitialized `bool` is undefined behavior: -/// -/// ```rust,no_run -/// # extern crate maybe_uninit; -/// # fn main() { -/// use maybe_uninit::MaybeUninit; -/// use std::mem; -/// -/// let b: bool = unsafe { mem::uninitialized() }; // undefined behavior! -/// // The equivalent code with `MaybeUninit`: -/// let b: bool = unsafe { MaybeUninit::uninit().assume_init() }; // undefined behavior! -/// # } -/// ``` -/// -/// Moreover, uninitialized memory is special in that the compiler knows that -/// it does not have a fixed value. This makes it undefined behavior to have -/// uninitialized data in a variable even if that variable has an integer type, -/// which otherwise can hold any *fixed* bit pattern: -/// -/// ```rust,no_run -/// # extern crate maybe_uninit; -/// # fn main() { -/// use maybe_uninit::MaybeUninit; -/// use std::mem; -/// -/// let x: i32 = unsafe { mem::uninitialized() }; // undefined behavior! -/// // The equivalent code with `MaybeUninit`: -/// let x: i32 = unsafe { MaybeUninit::uninit().assume_init() }; // undefined behavior! -/// # } -/// ``` -/// (Notice that the rules around uninitialized integers are not finalized yet, but -/// until they are, it is advisable to avoid them.) -/// -/// On top of that, remember that most types have additional invariants beyond merely -/// being considered initialized at the type level. For example, a `1`-initialized [`Vec`] -/// is considered initialized because the only requirement the compiler knows about it -/// is that the data pointer must be non-null. Creating such a `Vec` does not cause -/// *immediate* undefined behavior, but will cause undefined behavior with most -/// safe operations (including dropping it). -/// -/// [`Vec`]: ../../std/vec/struct.Vec.html -/// -/// # Examples -/// -/// `MaybeUninit` serves to enable unsafe code to deal with uninitialized data. -/// It is a signal to the compiler indicating that the data here might *not* -/// be initialized: -/// -/// ```rust -/// # extern crate maybe_uninit; -/// # fn main() { -/// use maybe_uninit::MaybeUninit; -/// use std::ptr::write; -/// -/// // Create an explicitly uninitialized reference. The compiler knows that data inside -/// // a `MaybeUninit` may be invalid, and hence this is not UB: -/// let mut x = MaybeUninit::<&i32>::uninit(); -/// // Set it to a valid value. -/// const V: &'static i32 = &0; -/// unsafe { write(x.as_mut_ptr(), V); } -/// // Extract the initialized data -- this is only allowed *after* properly -/// // initializing `x`! -/// let x = unsafe { x.assume_init() }; -/// # } -/// ``` -/// -/// The compiler then knows to not make any incorrect assumptions or optimizations on this code. -/// -/// You can think of `MaybeUninit` as being a bit like `Option` but without -/// any of the run-time tracking and without any of the safety checks. -/// -/// ## out-pointers -/// -/// You can use `MaybeUninit` to implement "out-pointers": instead of returning data -/// from a function, pass it a pointer to some (uninitialized) memory to put the -/// result into. This can be useful when it is important for the caller to control -/// how the memory the result is stored in gets allocated, and you want to avoid -/// unnecessary moves. -/// -/// ``` -/// # extern crate maybe_uninit; -/// # fn main() { -/// use maybe_uninit::MaybeUninit; -/// use std::ptr; -/// -/// unsafe fn make_vec(out: *mut Vec) { -/// // `write` does not drop the old contents, which is important. -/// ptr::write(out, vec![1, 2, 3]); -/// } -/// -/// let mut v = MaybeUninit::uninit(); -/// unsafe { make_vec(v.as_mut_ptr()); } -/// // Now we know `v` is initialized! This also makes sure the vector gets -/// // properly dropped. -/// let v = unsafe { v.assume_init() }; -/// assert_eq!(&v, &[1, 2, 3]); -/// # } -/// ``` -/// -/// ## Initializing an array element-by-element -/// -/// `MaybeUninit` can be used to initialize a large array element-by-element: -/// -/// ``` -/// # extern crate maybe_uninit; -/// # fn main() { -/// use maybe_uninit::MaybeUninit; -/// use std::mem; -/// use std::ptr; -/// -/// let data = { -/// // Create an uninitialized array of `MaybeUninit`. The `assume_init` is -/// // safe because the type we are claiming to have initialized here is a -/// // bunch of `MaybeUninit`s, which do not require initialization. -/// let mut data: [MaybeUninit>; 1000] = unsafe { -/// MaybeUninit::uninit().assume_init() -/// }; -/// -/// // Dropping a `MaybeUninit` does nothing, so if there is a panic during this loop, -/// // we have a memory leak, but there is no memory safety issue. -/// for elem in &mut data[..] { -/// unsafe { ptr::write(elem.as_mut_ptr(), vec![42]); } -/// } -/// -/// // Everything is initialized. Transmute the array to the -/// // initialized type. -/// unsafe { mem::transmute::<_, [Vec; 1000]>(data) } -/// }; -/// -/// assert_eq!(&data[0], &[42]); -/// # } -/// ``` -/// -/// You can also work with partially initialized arrays, which could -/// be found in low-level datastructures. -/// -/// ``` -/// # extern crate maybe_uninit; -/// # fn main() { -/// use maybe_uninit::MaybeUninit; -/// use std::ptr; -/// -/// // Create an uninitialized array of `MaybeUninit`. The `assume_init` is -/// // safe because the type we are claiming to have initialized here is a -/// // bunch of `MaybeUninit`s, which do not require initialization. -/// let mut data: [MaybeUninit; 1000] = unsafe { MaybeUninit::uninit().assume_init() }; -/// // Count the number of elements we have assigned. -/// let mut data_len: usize = 0; -/// -/// for elem in &mut data[0..500] { -/// unsafe { ptr::write(elem.as_mut_ptr(), String::from("hello")); } -/// data_len += 1; -/// } -/// -/// // For each item in the array, drop if we allocated it. -/// for elem in &mut data[0..data_len] { -/// unsafe { ptr::drop_in_place(elem.as_mut_ptr()); } -/// } -/// # } -/// ``` -/// -/// ## Initializing a struct field-by-field -/// -/// There is currently no supported way to create a raw pointer or reference -/// to a field of a struct inside `MaybeUninit`. That means it is not possible -/// to create a struct by calling `MaybeUninit::uninit::()` and then writing -/// to its fields. -/// -/// [ub]: ../../reference/behavior-considered-undefined.html -/// -/// # Layout -/// -/// `MaybeUninit` is guaranteed to have the same size, alignment, and ABI as `T`: -/// -/// ```rust -/// # extern crate maybe_uninit; -/// # #[cfg(not(derive_copy))] fn main() {} -/// # #[cfg(derive_copy)] fn main() { -/// use maybe_uninit::MaybeUninit; -/// use std::mem::{size_of, align_of}; -/// assert_eq!(size_of::>(), size_of::()); -/// assert_eq!(align_of::>(), align_of::()); -/// # } -/// ``` -/// -/// However remember that a type *containing* a `MaybeUninit` is not necessarily the same -/// layout; Rust does not in general guarantee that the fields of a `Foo` have the same order as -/// a `Foo` even if `T` and `U` have the same size and alignment. Furthermore because any bit -/// value is valid for a `MaybeUninit` the compiler can't apply non-zero/niche-filling -/// optimizations, potentially resulting in a larger size: -/// -/// ```no_run -/// # extern crate maybe_uninit; -/// # fn main() { -/// # use maybe_uninit::MaybeUninit; -/// # use std::mem::size_of; -/// assert_eq!(size_of::>(), 1); -/// assert_eq!(size_of::>>(), 2); -/// # } -/// ``` -/// -/// If `T` is FFI-safe, then so is `MaybeUninit`. -/// -/// While `MaybeUninit` is `#[repr(transparent)]` (indicating it guarantees the same size, -/// alignment, and ABI as `T`), this does *not* change any of the previous caveats. `Option` and -/// `Option>` may still have different sizes, and types containing a field of type -/// `T` may be laid out (and sized) differently than if that field were `MaybeUninit`. -/// `MaybeUninit` is a union type, and `#[repr(transparent)]` on unions is unstable (see [the -/// tracking issue](https://github.com/rust-lang/rust/issues/60405)). Over time, the exact -/// guarantees of `#[repr(transparent)]` on unions may evolve, and `MaybeUninit` may or may not -/// remain `#[repr(transparent)]`. That said, `MaybeUninit` will *always* guarantee that it has -/// the same size, alignment, and ABI as `T`; it's just that the way `MaybeUninit` implements that -/// guarantee may evolve. -#[cfg_attr(derive_copy, derive(Copy))] -#[cfg_attr(repr_transparent, repr(transparent))] -#[cfg_attr(not(repr_transparent), repr(C))] -pub struct MaybeUninit { - value: ManuallyDrop, -} - -#[cfg(derive_copy)] -impl Clone for MaybeUninit { - #[inline(always)] - fn clone(&self) -> Self { - // Not calling `T::clone()`, we cannot know if we are initialized enough for that. - *self - } -} - -impl MaybeUninit { - /// Creates a new `MaybeUninit` initialized with the given value. - /// It is safe to call [`assume_init`] on the return value of this function. - /// - /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. - /// It is your responsibility to make sure `T` gets dropped if it got initialized. - /// - /// [`assume_init`]: #method.assume_init - #[inline(always)] - pub fn new(val: T) -> MaybeUninit { - MaybeUninit { value: ManuallyDrop::new(val) } - } - - /// Creates a new `MaybeUninit` in an uninitialized state. - /// - /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. - /// It is your responsibility to make sure `T` gets dropped if it got initialized. - /// - /// See the [type-level documentation][type] for some examples. - /// - /// [type]: union.MaybeUninit.html - #[inline(always)] - pub fn uninit() -> MaybeUninit { - unsafe { MaybeUninit { value: uninitialized() } } - } - - /// Creates a new `MaybeUninit` in an uninitialized state, with the memory being - /// filled with `0` bytes. It depends on `T` whether that already makes for - /// proper initialization. For example, `MaybeUninit::zeroed()` is initialized, - /// but `MaybeUninit<&'static i32>::zeroed()` is not because references must not - /// be null. - /// - /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. - /// It is your responsibility to make sure `T` gets dropped if it got initialized. - /// - /// # Example - /// - /// Correct usage of this function: initializing a struct with zero, where all - /// fields of the struct can hold the bit-pattern 0 as a valid value. - /// - /// ```rust - /// # extern crate maybe_uninit; - /// # fn main() { - /// use maybe_uninit::MaybeUninit; - /// - /// let x = MaybeUninit::<(u8, bool)>::zeroed(); - /// let x = unsafe { x.assume_init() }; - /// assert_eq!(x, (0, false)); - /// # } - /// ``` - /// - /// *Incorrect* usage of this function: initializing a struct with zero, where some fields - /// cannot hold 0 as a valid value. - /// - /// ```rust,no_run - /// # extern crate maybe_uninit; - /// # fn main() { - /// use maybe_uninit::MaybeUninit; - /// - /// enum NotZero { One = 1, Two = 2 }; - /// - /// let x = MaybeUninit::<(u8, NotZero)>::zeroed(); - /// let x = unsafe { x.assume_init() }; - /// // Inside a pair, we create a `NotZero` that does not have a valid discriminant. - /// // This is undefined behavior. - /// # } - /// ``` - #[inline] - pub fn zeroed() -> MaybeUninit { - let mut u = MaybeUninit::::uninit(); - unsafe { - ptr::write_bytes(u.as_mut_ptr(), 0u8, 1); - } - u - } - - /* - /// Sets the value of the `MaybeUninit`. This overwrites any previous value - /// without dropping it, so be careful not to use this twice unless you want to - /// skip running the destructor. For your convenience, this also returns a mutable - /// reference to the (now safely initialized) contents of `self`. - #[unstable(feature = "maybe_uninit_extra", issue = "53491")] - #[inline(always)] - pub fn write(&mut self, val: T) -> &mut T { - unsafe { - self.value = ManuallyDrop::new(val); - self.get_mut() - } - } - */ - - /// Gets a pointer to the contained value. Reading from this pointer or turning it - /// into a reference is undefined behavior unless the `MaybeUninit` is initialized. - /// Writing to memory that this pointer (non-transitively) points to is undefined behavior - /// (except inside an `UnsafeCell`). - /// - /// # Examples - /// - /// Correct usage of this method: - /// - /// ```rust - /// # extern crate maybe_uninit; - /// # fn main() { - /// use maybe_uninit::MaybeUninit; - /// use std::ptr; - /// - /// let mut x = MaybeUninit::>::uninit(); - /// unsafe { ptr::write(x.as_mut_ptr(), vec![0,1,2]); } - /// // Create a reference into the `MaybeUninit`. This is okay because we initialized it. - /// let x_vec = unsafe { &*x.as_ptr() }; - /// assert_eq!(x_vec.len(), 3); - /// # } - /// ``` - /// - /// *Incorrect* usage of this method: - /// - /// ```rust,no_run - /// # extern crate maybe_uninit; - /// # fn main() { - /// use maybe_uninit::MaybeUninit; - /// - /// let x = MaybeUninit::>::uninit(); - /// let x_vec = unsafe { &*x.as_ptr() }; - /// // We have created a reference to an uninitialized vector! This is undefined behavior. - /// # } - /// ``` - /// - /// (Notice that the rules around references to uninitialized data are not finalized yet, but - /// until they are, it is advisable to avoid them.) - #[inline(always)] - pub fn as_ptr(&self) -> *const T { - &*self.value as *const T - } - - /// Gets a mutable pointer to the contained value. Reading from this pointer or turning it - /// into a reference is undefined behavior unless the `MaybeUninit` is initialized. - /// - /// # Examples - /// - /// Correct usage of this method: - /// - /// ```rust - /// # extern crate maybe_uninit; - /// # fn main() { - /// use maybe_uninit::MaybeUninit; - /// use std::ptr; - /// - /// let mut x = MaybeUninit::>::uninit(); - /// unsafe { ptr::write(x.as_mut_ptr(), vec![0,1,2]); } - /// // Create a reference into the `MaybeUninit>`. - /// // This is okay because we initialized it. - /// let x_vec = unsafe { &mut *x.as_mut_ptr() }; - /// x_vec.push(3); - /// assert_eq!(x_vec.len(), 4); - /// # } - /// ``` - /// - /// *Incorrect* usage of this method: - /// - /// ```rust,no_run - /// # extern crate maybe_uninit; - /// # fn main() { - /// use maybe_uninit::MaybeUninit; - /// - /// let mut x = MaybeUninit::>::uninit(); - /// let x_vec = unsafe { &mut *x.as_mut_ptr() }; - /// // We have created a reference to an uninitialized vector! This is undefined behavior. - /// # } - /// ``` - /// - /// (Notice that the rules around references to uninitialized data are not finalized yet, but - /// until they are, it is advisable to avoid them.) - #[inline(always)] - pub fn as_mut_ptr(&mut self) -> *mut T { - &mut *self.value as *mut T - } - - /// Extracts the value from the `MaybeUninit` container. This is a great way - /// to ensure that the data will get dropped, because the resulting `T` is - /// subject to the usual drop handling. - /// - /// # Safety - /// - /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized - /// state. Calling this when the content is not yet fully initialized causes immediate undefined - /// behavior. The [type-level documentation][inv] contains more information about - /// this initialization invariant. - /// - /// [inv]: #initialization-invariant - /// - /// # Examples - /// - /// Correct usage of this method: - /// - /// ```rust - /// # extern crate maybe_uninit; - /// # fn main() { - /// use maybe_uninit::MaybeUninit; - /// use std::ptr::write; - /// - /// let mut x = MaybeUninit::::uninit(); - /// unsafe { write(x.as_mut_ptr(), true); } - /// let x_init = unsafe { x.assume_init() }; - /// assert_eq!(x_init, true); - /// # } - /// ``` - /// - /// *Incorrect* usage of this method: - /// - /// ```rust,no_run - /// # extern crate maybe_uninit; - /// # fn main() { - /// use maybe_uninit::MaybeUninit; - /// - /// let x = MaybeUninit::>::uninit(); - /// let x_init = unsafe { x.assume_init() }; - /// // `x` had not been initialized yet, so this last line caused undefined behavior. - /// # } - /// ``` - #[inline(always)] - pub unsafe fn assume_init(self) -> T { - //intrinsics::panic_if_uninhabited::(); - ManuallyDrop::into_inner(self.value) - } - - /* - /// Reads the value from the `MaybeUninit` container. The resulting `T` is subject - /// to the usual drop handling. - /// - /// Whenever possible, it is preferrable to use [`assume_init`] instead, which - /// prevents duplicating the content of the `MaybeUninit`. - /// - /// # Safety - /// - /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized - /// state. Calling this when the content is not yet fully initialized causes undefined - /// behavior. The [type-level documentation][inv] contains more information about - /// this initialization invariant. - /// - /// Moreover, this leaves a copy of the same data behind in the `MaybeUninit`. When using - /// multiple copies of the data (by calling `read` multiple times, or first - /// calling `read` and then [`assume_init`]), it is your responsibility - /// to ensure that that data may indeed be duplicated. - /// - /// [inv]: #initialization-invariant - /// [`assume_init`]: #method.assume_init - /// - /// # Examples - /// - /// Correct usage of this method: - /// - /// ```rust - /// #![feature(maybe_uninit_extra)] - /// use std::mem::MaybeUninit; - /// - /// let mut x = MaybeUninit::::uninit(); - /// x.write(13); - /// let x1 = unsafe { x.read() }; - /// // `u32` is `Copy`, so we may read multiple times. - /// let x2 = unsafe { x.read() }; - /// assert_eq!(x1, x2); - /// - /// let mut x = MaybeUninit::>>::uninit(); - /// x.write(None); - /// let x1 = unsafe { x.read() }; - /// // Duplicating a `None` value is okay, so we may read multiple times. - /// let x2 = unsafe { x.read() }; - /// assert_eq!(x1, x2); - /// ``` - /// - /// *Incorrect* usage of this method: - /// - /// ```rust,no_run - /// #![feature(maybe_uninit_extra)] - /// use std::mem::MaybeUninit; - /// - /// let mut x = MaybeUninit::>>::uninit(); - /// x.write(Some(vec![0,1,2])); - /// let x1 = unsafe { x.read() }; - /// let x2 = unsafe { x.read() }; - /// // We now created two copies of the same vector, leading to a double-free when - /// // they both get dropped! - /// ``` - #[unstable(feature = "maybe_uninit_extra", issue = "53491")] - #[inline(always)] - pub unsafe fn read(&self) -> T { - intrinsics::panic_if_uninhabited::(); - self.as_ptr().read() - } - - /// Gets a reference to the contained value. - /// - /// # Safety - /// - /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized - /// state. Calling this when the content is not yet fully initialized causes undefined - /// behavior. - #[unstable(feature = "maybe_uninit_ref", issue = "53491")] - #[inline(always)] - pub unsafe fn get_ref(&self) -> &T { - &*self.value - } - - /// Gets a mutable reference to the contained value. - /// - /// # Safety - /// - /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized - /// state. Calling this when the content is not yet fully initialized causes undefined - /// behavior. - // FIXME(#53491): We currently rely on the above being incorrect, i.e., we have references - // to uninitialized data (e.g., in `libcore/fmt/float.rs`). We should make - // a final decision about the rules before stabilization. - #[unstable(feature = "maybe_uninit_ref", issue = "53491")] - #[inline(always)] - pub unsafe fn get_mut(&mut self) -> &mut T { - &mut *self.value - } - - /// Gets a pointer to the first element of the array. - #[unstable(feature = "maybe_uninit_slice", issue = "53491")] - #[inline(always)] - pub fn first_ptr(this: &[MaybeUninit]) -> *const T { - this as *const [MaybeUninit] as *const T - } - - /// Gets a mutable pointer to the first element of the array. - #[unstable(feature = "maybe_uninit_slice", issue = "53491")] - #[inline(always)] - pub fn first_ptr_mut(this: &mut [MaybeUninit]) -> *mut T { - this as *mut [MaybeUninit] as *mut T - }*/ -} diff -Nru cargo-0.44.1/vendor/maybe-uninit/tests/doesnt_drop.rs cargo-0.47.0/vendor/maybe-uninit/tests/doesnt_drop.rs --- cargo-0.44.1/vendor/maybe-uninit/tests/doesnt_drop.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/maybe-uninit/tests/doesnt_drop.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -extern crate maybe_uninit; -use maybe_uninit::MaybeUninit; - -use std::cell::Cell; - -struct DecrementOnDrop<'a>(&'a Cell); - -impl<'a> DecrementOnDrop<'a> { - pub fn new(ref_:&'a Cell) -> Self { - ref_.set(1); - DecrementOnDrop(ref_) - } -} - -impl<'a> Clone for DecrementOnDrop<'a> { - fn clone(&self) -> Self { - self.0.set(self.0.get() + 1); - - DecrementOnDrop(self.0) - } -} - -impl<'a> Drop for DecrementOnDrop<'a>{ - fn drop(&mut self) { - self.0.set(self.0.get() - 1); - } -} - -#[test] -fn doesnt_drop(){ - let count = Cell::new(0); - let arc = DecrementOnDrop::new(&count); - let maybe = MaybeUninit::new(arc.clone()); - assert_eq!(count.get(), 2); - drop(maybe); - assert_eq!(count.get(), 2); -} diff -Nru cargo-0.44.1/vendor/miniz_oxide/build.rs cargo-0.47.0/vendor/miniz_oxide/build.rs --- cargo-0.44.1/vendor/miniz_oxide/build.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/miniz_oxide/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,5 @@ +use autocfg; + +fn main() { + autocfg::new().emit_sysroot_crate("alloc"); +} diff -Nru cargo-0.44.1/vendor/miniz_oxide/.cargo-checksum.json cargo-0.47.0/vendor/miniz_oxide/.cargo-checksum.json --- cargo-0.44.1/vendor/miniz_oxide/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/miniz_oxide/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"aa679ff6578b1cddee93d7e82e263b94a575e0bfced07284eb0c037c1d2416a5"} \ No newline at end of file +{"files":{},"package":"c60c0dfe32c10b43a144bad8fc83538c52f58302c92300ea7ec7bf7b38d5a7b9"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/miniz_oxide/Cargo.toml cargo-0.47.0/vendor/miniz_oxide/Cargo.toml --- cargo-0.44.1/vendor/miniz_oxide/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/miniz_oxide/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,8 +13,9 @@ [package] edition = "2018" name = "miniz_oxide" -version = "0.3.6" +version = "0.4.2" authors = ["Frommi ", "oyvindln "] +build = "build.rs" exclude = ["benches/*", "tests/*"] description = "DEFLATE compression and decompression library rewritten in Rust based on miniz" homepage = "https://github.com/Frommi/miniz_oxide/tree/master/miniz_oxide" @@ -27,5 +28,26 @@ [lib] name = "miniz_oxide" -[dependencies.adler32] -version = "1.0.4" +[dependencies.adler] +version = "0.2.1" +default-features = false + +[dependencies.alloc] +version = "1.0.0" +optional = true +package = "rustc-std-workspace-alloc" + +[dependencies.compiler_builtins] +version = "0.1.2" +optional = true + +[dependencies.core] +version = "1.0.0" +optional = true +package = "rustc-std-workspace-core" +[build-dependencies.autocfg] +version = "1.0" + +[features] +no_extern_crate_alloc = [] +rustc-dep-of-std = ["core", "alloc", "compiler_builtins", "adler/rustc-dep-of-std"] diff -Nru cargo-0.44.1/vendor/miniz_oxide/src/deflate/buffer.rs cargo-0.47.0/vendor/miniz_oxide/src/deflate/buffer.rs --- cargo-0.44.1/vendor/miniz_oxide/src/deflate/buffer.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/miniz_oxide/src/deflate/buffer.rs 2020-10-01 21:38:28.000000000 +0000 @@ -10,6 +10,17 @@ pub const OUT_BUF_SIZE: usize = (LZ_CODE_BUF_SIZE * 13) / 10; pub const LZ_DICT_FULL_SIZE: usize = LZ_DICT_SIZE + MAX_MATCH_LEN - 1 + 1; +/// Size of hash values in the hash chains. +pub const LZ_HASH_BITS: i32 = 15; +/// How many bits to shift when updating the current hash value. +pub const LZ_HASH_SHIFT: i32 = (LZ_HASH_BITS + 2) / 3; +/// Size of the chained hash tables. +pub const LZ_HASH_SIZE: usize = 1 << LZ_HASH_BITS; + +pub fn update_hash(current_hash: u32, byte: u8) -> u32 { + ((current_hash << LZ_HASH_SHIFT) ^ u32::from(byte)) & (LZ_HASH_SIZE as u32 - 1) +} + pub struct HashBuffers { pub dict: [u8; LZ_DICT_FULL_SIZE], pub next: [u16; LZ_DICT_SIZE], diff -Nru cargo-0.44.1/vendor/miniz_oxide/src/deflate/core.rs cargo-0.47.0/vendor/miniz_oxide/src/deflate/core.rs --- cargo-0.44.1/vendor/miniz_oxide/src/deflate/core.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/miniz_oxide/src/deflate/core.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,18 +1,24 @@ //! Streaming compression functionality. -use std::convert::TryInto; -use std::io::{self, Cursor, Seek, SeekFrom, Write}; -use std::{cmp, mem}; +use alloc::boxed::Box; +use core::convert::TryInto; +use core::{cmp, mem}; use super::super::*; use super::deflate_flags::*; use super::CompressionLevel; use crate::deflate::buffer::{ - HashBuffers, LocalBuf, LZ_CODE_BUF_SIZE, LZ_DICT_FULL_SIZE, OUT_BUF_SIZE, + update_hash, HashBuffers, LocalBuf, LZ_CODE_BUF_SIZE, LZ_DICT_FULL_SIZE, LZ_HASH_BITS, + LZ_HASH_SHIFT, LZ_HASH_SIZE, OUT_BUF_SIZE, }; use crate::shared::{update_adler32, HUFFMAN_LENGTH_ORDER, MZ_ADLER32_INIT}; use crate::DataFormat; +// Currently not bubbled up outside this module, so can fill in with more +// context eventually if needed. +type Result = core::result::Result; +struct Error {} + const MAX_PROBES_MASK: i32 = 0xFFF; const MAX_SUPPORTED_HUFF_CODESIZE: usize = 32; @@ -250,15 +256,8 @@ } const MAX_HUFF_SYMBOLS: usize = 288; -/// Size of hash values in the hash chains. -const LZ_HASH_BITS: i32 = 15; /// Size of hash chain for fast compression mode. const LEVEL1_HASH_SIZE_MASK: u32 = 4095; -/// How many bits to shift when updating the current hash value. -const LZ_HASH_SHIFT: i32 = (LZ_HASH_BITS + 2) / 3; -/// Size of the chained hash tables. -const LZ_HASH_SIZE: usize = 1 << LZ_HASH_BITS; - /// The number of huffman tables used by the compressor. /// Literal/length, Distances and Length of the huffman codes for the other two tables. const MAX_HUFF_TABLES: usize = 3; @@ -271,9 +270,9 @@ /// Size of the chained hash table. pub(crate) const LZ_DICT_SIZE: usize = 32_768; /// Mask used when stepping through the hash chains. -const LZ_DICT_SIZE_MASK: u32 = LZ_DICT_SIZE as u32 - 1; +const LZ_DICT_SIZE_MASK: usize = (LZ_DICT_SIZE as u32 - 1) as usize; /// The minimum length of a match. -const MIN_MATCH_LEN: u32 = 3; +const MIN_MATCH_LEN: u8 = 3; /// The maximum length of a match. pub(crate) const MAX_MATCH_LEN: usize = 258; @@ -513,7 +512,7 @@ /// Callback function and user used in `compress_to_output`. pub struct CallbackFunc<'a> { - pub put_buf_func: Box bool + 'a>, + pub put_buf_func: &'a mut dyn FnMut(&[u8]) -> bool, } impl<'a> CallbackFunc<'a> { @@ -555,12 +554,12 @@ .copy_from_slice(¶ms.local_buf.b[..n]); params.out_buf_ofs += n; - if saved_output.pos != n as u64 { + if saved_output.pos != n { params.flush_ofs = n as u32; - params.flush_remaining = (saved_output.pos - n as u64) as u32; + params.flush_remaining = (saved_output.pos - n) as u32; } } else { - params.out_buf_ofs += saved_output.pos as usize; + params.out_buf_ofs += saved_output.pos; } params.flush_remaining as i32 @@ -591,9 +590,9 @@ } }; - let cursor = Cursor::new(chosen_buffer); OutputBufferOxide { - inner: cursor, + inner: chosen_buffer, + inner_pos: 0, local: is_local, bit_buffer: 0, bits_in: 0, @@ -655,7 +654,8 @@ } struct OutputBufferOxide<'a> { - pub inner: Cursor<&'a mut [u8]>, + pub inner: &'a mut [u8], + pub inner_pos: usize, pub local: bool, pub bit_buffer: u32, @@ -668,9 +668,8 @@ self.bit_buffer |= bits << self.bits_in; self.bits_in += len; while self.bits_in >= 8 { - let pos = self.inner.position(); - self.inner.get_mut()[pos as usize] = self.bit_buffer as u8; - self.inner.set_position(pos + 1); + self.inner[self.inner_pos] = self.bit_buffer as u8; + self.inner_pos += 1; self.bit_buffer >>= 8; self.bits_in -= 8; } @@ -678,7 +677,7 @@ fn save(&self) -> SavedOutputBufferOxide { SavedOutputBufferOxide { - pos: self.inner.position(), + pos: self.inner_pos, bit_buffer: self.bit_buffer, bits_in: self.bits_in, local: self.local, @@ -686,7 +685,7 @@ } fn load(&mut self, saved: SavedOutputBufferOxide) { - self.inner.set_position(saved.pos); + self.inner_pos = saved.pos; self.bit_buffer = saved.bit_buffer; self.bits_in = saved.bits_in; self.local = saved.local; @@ -701,7 +700,7 @@ } struct SavedOutputBufferOxide { - pub pos: u64, + pub pos: usize, pub bit_buffer: u32, pub bits_in: u32, pub local: bool, @@ -718,17 +717,18 @@ self.bits_in += len; } - fn flush(&mut self, output: &mut OutputBufferOxide) -> io::Result<()> { - let pos = output.inner.position() as usize; + fn flush(&mut self, output: &mut OutputBufferOxide) -> Result<()> { + let pos = output.inner_pos; { // isolation to please borrow checker - let inner = &mut (*output.inner.get_mut())[pos..pos + 8]; + let inner = &mut output.inner[pos..pos + 8]; let bytes = u64::to_le_bytes(self.bit_buffer); inner.copy_from_slice(&bytes); } - output - .inner - .seek(SeekFrom::Current(i64::from(self.bits_in >> 3)))?; + match output.inner_pos.checked_add((self.bits_in >> 3) as usize) { + Some(n) if n <= output.inner.len() => output.inner_pos = n, + _ => return Err(Error {}), + } self.bit_buffer >>= self.bits_in & !7; self.bits_in &= 7; Ok(()) @@ -766,19 +766,21 @@ impl RLE { fn prev_code_size( &mut self, - packed_code_sizes: &mut Cursor<&mut [u8]>, + packed_code_sizes: &mut [u8], + packed_pos: &mut usize, h: &mut HuffmanOxide, - ) -> io::Result<()> { + ) -> Result<()> { + let mut write = |buf| write(buf, packed_code_sizes, packed_pos); let counts = &mut h.count[HUFF_CODES_TABLE]; if self.repeat_count != 0 { if self.repeat_count < 3 { counts[self.prev_code_size as usize] = counts[self.prev_code_size as usize].wrapping_add(self.repeat_count as u16); let code = self.prev_code_size; - packed_code_sizes.write_all(&[code, code, code][..self.repeat_count as usize])?; + write(&[code, code, code][..self.repeat_count as usize])?; } else { counts[16] = counts[16].wrapping_add(1); - packed_code_sizes.write_all(&[16, (self.repeat_count - 3) as u8][..])?; + write(&[16, (self.repeat_count - 3) as u8][..])?; } self.repeat_count = 0; } @@ -788,20 +790,22 @@ fn zero_code_size( &mut self, - packed_code_sizes: &mut Cursor<&mut [u8]>, + packed_code_sizes: &mut [u8], + packed_pos: &mut usize, h: &mut HuffmanOxide, - ) -> io::Result<()> { + ) -> Result<()> { + let mut write = |buf| write(buf, packed_code_sizes, packed_pos); let counts = &mut h.count[HUFF_CODES_TABLE]; if self.z_count != 0 { if self.z_count < 3 { counts[0] = counts[0].wrapping_add(self.z_count as u16); - packed_code_sizes.write_all(&[0, 0, 0][..self.z_count as usize])?; + write(&[0, 0, 0][..self.z_count as usize])?; } else if self.z_count <= 10 { counts[17] = counts[17].wrapping_add(1); - packed_code_sizes.write_all(&[17, (self.z_count - 3) as u8][..])?; + write(&[17, (self.z_count - 3) as u8][..])?; } else { counts[18] = counts[18].wrapping_add(1); - packed_code_sizes.write_all(&[18, (self.z_count - 11) as u8][..])?; + write(&[18, (self.z_count - 11) as u8][..])?; } self.z_count = 0; } @@ -810,6 +814,15 @@ } } +fn write(src: &[u8], dst: &mut [u8], dst_pos: &mut usize) -> Result<()> { + match dst.get_mut(*dst_pos..*dst_pos + src.len()) { + Some(s) => s.copy_from_slice(src), + None => return Err(Error {}), + } + *dst_pos += src.len(); + Ok(()) +} + impl Default for HuffmanOxide { fn default() -> Self { HuffmanOxide { @@ -1042,7 +1055,7 @@ output.put_bits(0b01, 2) } - fn start_dynamic_block(&mut self, output: &mut OutputBufferOxide) -> io::Result<()> { + fn start_dynamic_block(&mut self, output: &mut OutputBufferOxide) -> Result<()> { // There will always be one, and only one end of block code. self.count[0][256] = 1; @@ -1081,25 +1094,25 @@ memset(&mut self.count[HUFF_CODES_TABLE][..MAX_HUFF_SYMBOLS_2], 0); - let mut packed_code_sizes_cursor = Cursor::new(&mut packed_code_sizes[..]); + let mut packed_pos = 0; for &code_size in &code_sizes_to_pack[..total_code_sizes_to_pack] { if code_size == 0 { - rle.prev_code_size(&mut packed_code_sizes_cursor, self)?; + rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; rle.z_count += 1; if rle.z_count == 138 { - rle.zero_code_size(&mut packed_code_sizes_cursor, self)?; + rle.zero_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; } } else { - rle.zero_code_size(&mut packed_code_sizes_cursor, self)?; + rle.zero_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; if code_size != rle.prev_code_size { - rle.prev_code_size(&mut packed_code_sizes_cursor, self)?; + rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; self.count[HUFF_CODES_TABLE][code_size as usize] = self.count[HUFF_CODES_TABLE][code_size as usize].wrapping_add(1); - packed_code_sizes_cursor.write_all(&[code_size][..])?; + write(&[code_size], &mut packed_code_sizes, &mut packed_pos)?; } else { rle.repeat_count += 1; if rle.repeat_count == 6 { - rle.prev_code_size(&mut packed_code_sizes_cursor, self)?; + rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; } } } @@ -1107,9 +1120,9 @@ } if rle.repeat_count != 0 { - rle.prev_code_size(&mut packed_code_sizes_cursor, self)?; + rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; } else { - rle.zero_code_size(&mut packed_code_sizes_cursor, self)?; + rle.zero_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; } self.optimize_table(2, MAX_HUFF_SYMBOLS_2, 7, false); @@ -1136,8 +1149,7 @@ } let mut packed_code_size_index = 0 as usize; - let packed_code_sizes = packed_code_sizes_cursor.get_ref(); - while packed_code_size_index < packed_code_sizes_cursor.position() as usize { + while packed_code_size_index < packed_pos { let code = packed_code_sizes[packed_code_size_index] as usize; packed_code_size_index += 1; assert!(code < MAX_HUFF_SYMBOLS_2); @@ -1166,10 +1178,10 @@ /// Padded with 1 byte to simplify matching code in `compress_fast`. pub b: Box, - pub code_buf_dict_pos: u32, - pub lookahead_size: u32, - pub lookahead_pos: u32, - pub size: u32, + pub code_buf_dict_pos: usize, + pub lookahead_size: usize, + pub lookahead_pos: usize, + pub size: usize, } fn probes_from_flags(flags: u32) -> [u32; 2] { @@ -1206,7 +1218,7 @@ /// Do an unaligned read of the data at `pos` in the dictionary and treat it as if it was of /// type T. #[inline] - fn read_unaligned_u32(&self, pos: u32) -> u32 { + fn read_unaligned_u32(&self, pos: usize) -> u32 { // Masking the value here helps avoid bounds checks. let pos = (pos & LZ_DICT_SIZE_MASK) as usize; let end = pos + 4; @@ -1220,7 +1232,7 @@ /// Do an unaligned read of the data at `pos` in the dictionary and treat it as if it was of /// type T. #[inline] - fn read_unaligned_u64(&self, pos: u32) -> u64 { + fn read_unaligned_u64(&self, pos: usize) -> u64 { let pos = pos as usize; let bytes: [u8; 8] = self.b.dict[pos..pos + 8].try_into().unwrap(); u64::from_le_bytes(bytes) @@ -1239,8 +1251,8 @@ /// values if no better matches were found. fn find_match( &self, - lookahead_pos: u32, - max_dist: u32, + lookahead_pos: usize, + max_dist: usize, max_match_len: u32, mut match_dist: u32, mut match_len: u32, @@ -1253,7 +1265,7 @@ let max_match_len = cmp::min(MAX_MATCH_LEN as u32, max_match_len); match_len = cmp::max(match_len, 1); - let pos = lookahead_pos & LZ_DICT_SIZE_MASK; + let pos = lookahead_pos as usize & LZ_DICT_SIZE_MASK; let mut probe_pos = pos; // Number of probes into the hash chains. let mut num_probes_left = self.max_probes[(match_len >= 32) as usize]; @@ -1279,7 +1291,7 @@ } for _ in 0..3 { - let next_probe_pos = u32::from(self.b.next[probe_pos as usize]); + let next_probe_pos = self.b.next[probe_pos as usize] as usize; dist = (lookahead_pos - next_probe_pos) & 0xFFFF; if next_probe_pos == 0 || dist > max_dist { @@ -1293,7 +1305,7 @@ // position to match against. probe_pos = next_probe_pos & LZ_DICT_SIZE_MASK; - if self.read_as_u16((probe_pos + match_len - 1) as usize) == c01 { + if self.read_as_u16((probe_pos + match_len as usize - 1) as usize) == c01 { break 'found; } } @@ -1325,10 +1337,10 @@ // If not all of the last 8 bytes matched, check how may of them did. let trailing = xor_data.trailing_zeros(); - let probe_len = p - pos + (trailing >> 3); - if probe_len > match_len { - match_dist = dist; - match_len = cmp::min(max_match_len, probe_len); + let probe_len = p - pos + (trailing as usize >> 3); + if probe_len > match_len as usize { + match_dist = dist as u32; + match_len = cmp::min(max_match_len, probe_len as u32); if match_len == max_match_len { // We found a match that had the maximum allowed length, // so there is now point searching further. @@ -1336,13 +1348,13 @@ } // We found a better match, so save the last two bytes for further match // comparisons. - c01 = self.read_as_u16((pos + match_len - 1) as usize) + c01 = self.read_as_u16(pos + match_len as usize - 1) } continue 'outer; } } - return (dist, cmp::min(max_match_len, MAX_MATCH_LEN as u32)); + return (dist as u32, cmp::min(max_match_len, MAX_MATCH_LEN as u32)); } } } @@ -1427,6 +1439,8 @@ pub code_position: usize, pub flag_position: usize, + // The total number of bytes in the current block. + // (Could maybe use usize, but it's not possible to exceed a block size of ) pub total_bytes: u32, pub num_flags_left: u32, } @@ -1478,7 +1492,7 @@ huff: &HuffmanOxide, output: &mut OutputBufferOxide, lz_code_buf: &[u8], -) -> io::Result { +) -> Result { let mut flags = 1; let mut bb = BitBuffer { bit_buffer: u64::from(output.bit_buffer), @@ -1577,7 +1591,7 @@ output: &mut OutputBufferOxide, lz: &LZOxide, static_block: bool, -) -> io::Result { +) -> Result { if static_block { huff.start_static_block(output); } else { @@ -1591,7 +1605,7 @@ d: &mut CompressorOxide, callback: &mut CallbackOxide, flush: TDEFLFlush, -) -> io::Result { +) -> Result { let mut saved_buffer; { let mut output = callback @@ -1639,7 +1653,7 @@ // (as literals are either 8 or 9 bytes), a raw block will // never take up less space if the number of input bytes are less than 32. let expanded = (d.lz.total_bytes > 32) - && (output.inner.position() - saved_buffer.pos + 1 >= u64::from(d.lz.total_bytes)) + && (output.inner_pos - saved_buffer.pos + 1 >= (d.lz.total_bytes as usize)) && (d.dict.lookahead_pos - d.dict.code_buf_dict_pos <= d.dict.size); if use_raw_block || expanded { @@ -1657,7 +1671,7 @@ // Write the actual bytes. for i in 0..d.lz.total_bytes { - let pos = (d.dict.code_buf_dict_pos + i) & LZ_DICT_SIZE_MASK; + let pos = (d.dict.code_buf_dict_pos + i as usize) & LZ_DICT_SIZE_MASK; output.put_bits(u32::from(d.dict.b.dict[pos as usize]), 8); } } else if !comp_success { @@ -1691,7 +1705,7 @@ d.lz.code_position = 1; d.lz.flag_position = 0; d.lz.num_flags_left = 8; - d.dict.code_buf_dict_pos += d.lz.total_bytes; + d.dict.code_buf_dict_pos += d.lz.total_bytes as usize; d.lz.total_bytes = 0; d.params.block_index += 1; @@ -1715,13 +1729,13 @@ } fn record_match(h: &mut HuffmanOxide, lz: &mut LZOxide, mut match_len: u32, mut match_dist: u32) { - assert!(match_len >= MIN_MATCH_LEN); + assert!(match_len >= MIN_MATCH_LEN.into()); assert!(match_dist >= 1); assert!(match_dist as usize <= LZ_DICT_SIZE); lz.total_bytes += match_len; match_dist -= 1; - match_len -= MIN_MATCH_LEN; + match_len -= u32::from(MIN_MATCH_LEN); lz.write_code(match_len as u8); lz.write_code(match_dist as u8); lz.write_code((match_dist >> 8) as u8); @@ -1756,25 +1770,32 @@ let src_buf_left = in_buf.len() - src_pos; let num_bytes_to_process = cmp::min(src_buf_left, MAX_MATCH_LEN - lookahead_size as usize); - if lookahead_size + d.dict.size >= MIN_MATCH_LEN - 1 && num_bytes_to_process > 0 { + if lookahead_size + d.dict.size >= usize::from(MIN_MATCH_LEN) - 1 + && num_bytes_to_process > 0 + { let dictb = &mut d.dict.b; - let mut dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK; - let mut ins_pos = lookahead_pos + lookahead_size - 2; - let mut hash = (u32::from(dictb.dict[(ins_pos & LZ_DICT_SIZE_MASK) as usize]) - << LZ_HASH_SHIFT) - ^ u32::from(dictb.dict[((ins_pos + 1) & LZ_DICT_SIZE_MASK) as usize]); + let mut dst_pos = (lookahead_pos + lookahead_size as usize) & LZ_DICT_SIZE_MASK; + let mut ins_pos = lookahead_pos + lookahead_size as usize - 2; + // Start the hash value from the first two bytes + let mut hash = update_hash( + u32::from(dictb.dict[(ins_pos & LZ_DICT_SIZE_MASK) as usize]), + dictb.dict[((ins_pos + 1) & LZ_DICT_SIZE_MASK) as usize], + ); + + lookahead_size += num_bytes_to_process; - lookahead_size += num_bytes_to_process as u32; for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] { + // Add byte to input buffer. dictb.dict[dst_pos as usize] = c; if (dst_pos as usize) < MAX_MATCH_LEN - 1 { dictb.dict[LZ_DICT_SIZE + dst_pos as usize] = c; } - hash = ((hash << LZ_HASH_SHIFT) ^ u32::from(c)) & (LZ_HASH_SIZE as u32 - 1); + // Generate hash from the current byte, + hash = update_hash(hash, c); dictb.next[(ins_pos & LZ_DICT_SIZE_MASK) as usize] = dictb.hash[hash as usize]; - + // and insert it into the hash chain. dictb.hash[hash as usize] = ins_pos as u16; dst_pos = (dst_pos + 1) & LZ_DICT_SIZE_MASK; ins_pos += 1; @@ -1790,7 +1811,7 @@ } lookahead_size += 1; - if lookahead_size + d.dict.size >= MIN_MATCH_LEN { + if lookahead_size + d.dict.size >= MIN_MATCH_LEN.into() { let ins_pos = lookahead_pos + lookahead_size - 3; let hash = ((u32::from(dictb.dict[(ins_pos & LZ_DICT_SIZE_MASK) as usize]) << (LZ_HASH_SHIFT * 2)) @@ -1807,7 +1828,7 @@ src_pos += num_bytes_to_process; } - d.dict.size = cmp::min(LZ_DICT_SIZE as u32 - lookahead_size, d.dict.size); + d.dict.size = cmp::min(LZ_DICT_SIZE - lookahead_size, d.dict.size); if d.params.flush == TDEFLFlush::None && (lookahead_size as usize) < MAX_MATCH_LEN { break; } @@ -1817,27 +1838,29 @@ let mut cur_match_len = if saved_match_len != 0 { saved_match_len } else { - MIN_MATCH_LEN - 1 + u32::from(MIN_MATCH_LEN) - 1 }; let cur_pos = lookahead_pos & LZ_DICT_SIZE_MASK; if d.params.flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS) != 0 { + // If TDEFL_RLE_MATCHES is set, we only look for repeating sequences of the current byte. if d.dict.size != 0 && d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS == 0 { let c = d.dict.b.dict[((cur_pos.wrapping_sub(1)) & LZ_DICT_SIZE_MASK) as usize]; cur_match_len = d.dict.b.dict[cur_pos as usize..(cur_pos + lookahead_size) as usize] .iter() .take_while(|&x| *x == c) .count() as u32; - if cur_match_len < MIN_MATCH_LEN { + if cur_match_len < MIN_MATCH_LEN.into() { cur_match_len = 0 } else { cur_match_dist = 1 } } } else { + // Try to find a match for the bytes at the current position. let dist_len = d.dict.find_match( lookahead_pos, d.dict.size, - lookahead_size, + lookahead_size as u32, cur_match_dist, cur_match_len, ); @@ -1845,9 +1868,9 @@ cur_match_len = dist_len.1; } - let far_and_small = cur_match_len == MIN_MATCH_LEN && cur_match_dist >= 8 * 1024; + let far_and_small = cur_match_len == MIN_MATCH_LEN.into() && cur_match_dist >= 8 * 1024; let filter_small = d.params.flags & TDEFL_FILTER_MATCHES != 0 && cur_match_len <= 5; - if far_and_small || filter_small || cur_pos == cur_match_dist { + if far_and_small || filter_small || cur_pos == cur_match_dist as usize { cur_match_dist = 0; cur_match_len = 0; } @@ -1858,7 +1881,7 @@ if cur_match_len >= 128 { record_match(&mut d.huff, &mut d.lz, cur_match_len, cur_match_dist); saved_match_len = 0; - len_to_move = cur_match_len; + len_to_move = cur_match_len as usize; } else { saved_lit = d.dict.b.dict[cur_pos as usize]; saved_match_dist = cur_match_dist; @@ -1866,7 +1889,7 @@ } } else { record_match(&mut d.huff, &mut d.lz, saved_match_len, saved_match_dist); - len_to_move = saved_match_len - 1; + len_to_move = (saved_match_len - 1) as usize; saved_match_len = 0; } } else if cur_match_dist == 0 { @@ -1882,7 +1905,7 @@ // If we are using lazy matching, check for matches at the next byte if the current // match was shorter than 128 bytes. record_match(&mut d.huff, &mut d.lz, cur_match_len, cur_match_dist); - len_to_move = cur_match_len; + len_to_move = cur_match_len as usize; } else { saved_lit = d.dict.b.dict[cmp::min(cur_pos as usize, d.dict.b.dict.len() - 1)]; saved_match_dist = cur_match_dist; @@ -1892,7 +1915,7 @@ lookahead_pos += len_to_move; assert!(lookahead_size >= len_to_move); lookahead_size -= len_to_move; - d.dict.size = cmp::min(d.dict.size + len_to_move, LZ_DICT_SIZE as u32); + d.dict.size = cmp::min(d.dict.size + len_to_move, LZ_DICT_SIZE); let lz_buf_tight = d.lz.code_position > LZ_CODE_BUF_SIZE - 8; let raw = d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0; @@ -1925,7 +1948,7 @@ true } -const COMP_FAST_LOOKAHEAD_SIZE: u32 = 4096; +const COMP_FAST_LOOKAHEAD_SIZE: usize = 4096; fn compress_fast(d: &mut CompressorOxide, callback: &mut CallbackOxide) -> bool { let mut src_pos = d.params.src_pos; @@ -1946,7 +1969,7 @@ in_buf.len() - src_pos, (COMP_FAST_LOOKAHEAD_SIZE - lookahead_size) as usize, ); - lookahead_size += num_bytes_to_process as u32; + lookahead_size += num_bytes_to_process; while num_bytes_to_process != 0 { let n = cmp::min(LZ_DICT_SIZE - dst_pos, num_bytes_to_process); @@ -1963,7 +1986,7 @@ num_bytes_to_process -= n; } - d.dict.size = cmp::min(LZ_DICT_SIZE as u32 - lookahead_size, d.dict.size); + d.dict.size = cmp::min(LZ_DICT_SIZE - lookahead_size, d.dict.size); if d.params.flush == TDEFLFlush::None && lookahead_size < COMP_FAST_LOOKAHEAD_SIZE { break; } @@ -1976,11 +1999,11 @@ let hash = (first_trigram ^ (first_trigram >> (24 - (LZ_HASH_BITS - 8)))) & LEVEL1_HASH_SIZE_MASK; - let mut probe_pos = u32::from(d.dict.b.hash[hash as usize]); + let mut probe_pos = usize::from(d.dict.b.hash[hash as usize]); d.dict.b.hash[hash as usize] = lookahead_pos as u16; - let mut cur_match_dist = (lookahead_pos - probe_pos) as u16; - if u32::from(cur_match_dist) <= d.dict.size { + let mut cur_match_dist = (lookahead_pos - probe_pos as usize) as u16; + if cur_match_dist as usize <= d.dict.size { probe_pos &= LZ_DICT_SIZE_MASK; let trigram = d.dict.read_unaligned_u32(probe_pos) & 0xFF_FFFF; @@ -1999,7 +2022,7 @@ q += 8; } else { let trailing = xor_data.trailing_zeros(); - break 'find_match p as u32 - cur_pos + (trailing >> 3); + break 'find_match p as u32 - cur_pos as u32 + (trailing >> 3); } } @@ -2010,8 +2033,8 @@ }; }; - if cur_match_len < MIN_MATCH_LEN - || (cur_match_len == MIN_MATCH_LEN && cur_match_dist >= 8 * 1024) + if cur_match_len < MIN_MATCH_LEN.into() + || (cur_match_len == MIN_MATCH_LEN.into() && cur_match_dist >= 8 * 1024) { let lit = first_trigram as u8; cur_match_len = 1; @@ -2021,13 +2044,13 @@ } else { // Limit the match to the length of the lookahead so we don't create a match // that ends after the end of the input data. - cur_match_len = cmp::min(cur_match_len, lookahead_size); - debug_assert!(cur_match_len >= MIN_MATCH_LEN); + cur_match_len = cmp::min(cur_match_len, lookahead_size as u32); + debug_assert!(cur_match_len >= MIN_MATCH_LEN.into()); debug_assert!(cur_match_dist >= 1); debug_assert!(cur_match_dist as usize <= LZ_DICT_SIZE); cur_match_dist -= 1; - d.lz.write_code((cur_match_len - MIN_MATCH_LEN) as u8); + d.lz.write_code((cur_match_len - u32::from(MIN_MATCH_LEN)) as u8); d.lz.write_code(cur_match_dist as u8); d.lz.write_code((cur_match_dist >> 8) as u8); @@ -2040,8 +2063,8 @@ [LARGE_DIST_SYM[(cur_match_dist >> 8) as usize] as usize] += 1; } - d.huff.count[0] - [LEN_SYM[(cur_match_len - MIN_MATCH_LEN) as usize] as usize] += 1; + d.huff.count[0][LEN_SYM[(cur_match_len - u32::from(MIN_MATCH_LEN)) as usize] + as usize] += 1; } } else { d.lz.write_code(first_trigram as u8); @@ -2051,10 +2074,10 @@ d.lz.consume_flag(); d.lz.total_bytes += cur_match_len; - lookahead_pos += cur_match_len; - d.dict.size = cmp::min(d.dict.size + cur_match_len, LZ_DICT_SIZE as u32); - cur_pos = (cur_pos + cur_match_len) & LZ_DICT_SIZE_MASK; - lookahead_size -= cur_match_len; + lookahead_pos += cur_match_len as usize; + d.dict.size = cmp::min(d.dict.size + cur_match_len as usize, LZ_DICT_SIZE); + cur_pos = (cur_pos + cur_match_len as usize) & LZ_DICT_SIZE_MASK; + lookahead_size -= cur_match_len as usize; if d.lz.code_position > LZ_CODE_BUF_SIZE - 8 { // These values are used in flush_block, so we need to write them back here. @@ -2090,7 +2113,7 @@ d.huff.count[0][lit as usize] += 1; lookahead_pos += 1; - d.dict.size = cmp::min(d.dict.size + 1, LZ_DICT_SIZE as u32); + d.dict.size = cmp::min(d.dict.size + 1, LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & LZ_DICT_SIZE_MASK; lookahead_size -= 1; @@ -2179,14 +2202,14 @@ d: &mut CompressorOxide, in_buf: &[u8], flush: TDEFLFlush, - callback_func: impl FnMut(&[u8]) -> bool, + mut callback_func: impl FnMut(&[u8]) -> bool, ) -> (TDEFLStatus, usize) { let res = compress_inner( d, &mut CallbackOxide::new_callback_func( in_buf, CallbackFunc { - put_buf_func: Box::new(callback_func), + put_buf_func: &mut callback_func, }, ), flush, @@ -2334,6 +2357,8 @@ MZ_DEFAULT_WINDOW_BITS, }; use crate::inflate::decompress_to_vec; + use std::prelude::v1::*; + use std::vec; #[test] fn u16_to_slice() { diff -Nru cargo-0.44.1/vendor/miniz_oxide/src/deflate/mod.rs cargo-0.47.0/vendor/miniz_oxide/src/deflate/mod.rs --- cargo-0.44.1/vendor/miniz_oxide/src/deflate/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/miniz_oxide/src/deflate/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,5 +1,8 @@ //! This module contains functionality for compression. +use crate::alloc::vec; +use crate::alloc::vec::Vec; + mod buffer; pub mod core; pub mod stream; @@ -119,7 +122,7 @@ // The comp flags function sets the zlib flag if the window_bits parameter is > 0. let flags = create_comp_flags_from_zip_params(level.into(), window_bits, strategy); let mut compressor = CompressorOxide::new(flags); - let mut output = vec![0; input.len() / 2]; + let mut output = vec![0; ::core::cmp::max(input.len() / 2, 2)]; let mut in_pos = 0; let mut out_pos = 0; @@ -157,6 +160,7 @@ mod test { use super::{compress_to_vec, compress_to_vec_inner, CompressionStrategy}; use crate::inflate::decompress_to_vec; + use std::vec; /// Test deflate example. /// diff -Nru cargo-0.44.1/vendor/miniz_oxide/src/deflate/stream.rs cargo-0.47.0/vendor/miniz_oxide/src/deflate/stream.rs --- cargo-0.44.1/vendor/miniz_oxide/src/deflate/stream.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/miniz_oxide/src/deflate/stream.rs 2020-10-01 21:38:28.000000000 +0000 @@ -3,7 +3,7 @@ //! As of now this is mainly inteded for use to build a higher-level wrapper. //! //! There is no DeflateState as the needed state is contained in the compressor struct itself. -use std::convert::{AsMut, AsRef}; +use core::convert::{AsMut, AsRef}; use crate::deflate::core::{compress, CompressorOxide, TDEFLFlush, TDEFLStatus}; use crate::{MZError, MZFlush, MZStatus, StreamResult}; @@ -100,6 +100,9 @@ use crate::deflate::CompressorOxide; use crate::inflate::decompress_to_vec_zlib; use crate::{MZFlush, MZStatus}; + use std::prelude::v1::*; + use std::vec; + #[test] fn test_state() { let data = b"Hello zlib!"; diff -Nru cargo-0.44.1/vendor/miniz_oxide/src/inflate/core.rs cargo-0.47.0/vendor/miniz_oxide/src/inflate/core.rs --- cargo-0.44.1/vendor/miniz_oxide/src/inflate/core.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/miniz_oxide/src/inflate/core.rs 2020-10-01 21:38:28.000000000 +0000 @@ -3,8 +3,8 @@ use super::*; use crate::shared::{update_adler32, HUFFMAN_LENGTH_ORDER}; -use std::convert::TryInto; -use std::{cmp, slice}; +use ::core::convert::TryInto; +use ::core::{cmp, slice}; use self::output_buffer::OutputBuffer; @@ -1020,39 +1020,24 @@ pub fn decompress( r: &mut DecompressorOxide, in_buf: &[u8], - out_cur: &mut Cursor<&mut [u8]>, - flags: u32, -) -> (TINFLStatus, usize, usize) { - let res = decompress_inner(r, in_buf, out_cur, flags); - let new_pos = out_cur.position() + res.2 as u64; - out_cur.set_position(new_pos); - res -} - -#[inline] -fn decompress_inner( - r: &mut DecompressorOxide, - in_buf: &[u8], - out_cur: &mut Cursor<&mut [u8]>, + out: &mut [u8], + out_pos: usize, flags: u32, ) -> (TINFLStatus, usize, usize) { - let out_buf_start_pos = out_cur.position() as usize; let out_buf_size_mask = if flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF != 0 { usize::max_value() } else { // In the case of zero len, any attempt to write would produce HasMoreOutput, // so to gracefully process the case of there really being no output, // set the mask to all zeros. - out_cur.get_ref().len().saturating_sub(1) + out.len().saturating_sub(1) }; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). // Also make sure that the output buffer position is not past the end of the output buffer. - if (out_buf_size_mask.wrapping_add(1) & out_buf_size_mask) != 0 - || out_cur.position() > out_cur.get_ref().len() as u64 - { + if (out_buf_size_mask.wrapping_add(1) & out_buf_size_mask) != 0 || out_pos > out.len() { return (TINFLStatus::BadParam, 0, 0); } @@ -1060,7 +1045,7 @@ let mut state = r.state; - let mut out_buf = OutputBuffer::from_slice_and_pos(out_cur.get_mut(), out_buf_start_pos); + let mut out_buf = OutputBuffer::from_slice_and_pos(out, out_pos); // Make a local copy of the important variables here so we can work with them on the stack. let mut l = LocalVars { @@ -1634,10 +1619,7 @@ let need_adler = flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32) != 0; if need_adler && status as i32 >= 0 { let out_buf_pos = out_buf.position(); - r.check_adler32 = update_adler32( - r.check_adler32, - &out_buf.get_ref()[out_buf_start_pos..out_buf_pos], - ); + r.check_adler32 = update_adler32(r.check_adler32, &out_buf.get_ref()[out_pos..out_buf_pos]); // disabled so that random input from fuzzer would not be rejected early, // before it has a chance to reach interesting parts of code @@ -1655,7 +1637,7 @@ ( status, in_buf.len() - in_iter.len() - in_undo, - out_buf.position() - out_buf_start_pos, + out_buf.position() - out_pos, ) } @@ -1671,8 +1653,7 @@ output_buffer: &mut [u8], flags: u32, ) -> (TINFLStatus, &'i [u8], usize) { - let (status, in_pos, out_pos) = - decompress(r, input_buffer, &mut Cursor::new(output_buffer), flags); + let (status, in_pos, out_pos) = decompress(r, input_buffer, output_buffer, 0, flags); (status, &input_buffer[in_pos..], out_pos) } @@ -1772,14 +1753,14 @@ fn check_result(input: &[u8], expected_status: TINFLStatus, expected_state: State, zlib: bool) { let mut r = DecompressorOxide::default(); let mut output_buf = vec![0; 1024 * 32]; - let mut out_cursor = Cursor::new(output_buf.as_mut_slice()); let flags = if zlib { inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER } else { 0 } | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | TINFL_FLAG_HAS_MORE_INPUT; - let (d_status, _in_bytes, _out_bytes) = decompress(&mut r, input, &mut out_cursor, flags); + let (d_status, _in_bytes, _out_bytes) = + decompress(&mut r, input, &mut output_buf, 0, flags); assert_eq!(expected_status, d_status); assert_eq!(expected_state, r.state); } @@ -1870,10 +1851,9 @@ | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; let mut r = DecompressorOxide::new(); let mut output_buf = vec![]; - let mut out_cursor = Cursor::new(output_buf.as_mut_slice()); // Check that we handle an empty buffer properly and not panicking. // https://github.com/Frommi/miniz_oxide/issues/23 - let res = decompress(&mut r, &encoded, &mut out_cursor, flags); + let res = decompress(&mut r, &encoded, &mut output_buf, 0, flags); assert_eq!(res, (TINFLStatus::HasMoreOutput, 4, 0)); } @@ -1885,10 +1865,9 @@ let flags = TINFL_FLAG_COMPUTE_ADLER32; let mut r = DecompressorOxide::new(); let mut output_buf = vec![]; - let mut out_cursor = Cursor::new(output_buf.as_mut_slice()); // Check that we handle an empty buffer properly and not panicking. // https://github.com/Frommi/miniz_oxide/issues/23 - let res = decompress(&mut r, &encoded, &mut out_cursor, flags); + let res = decompress(&mut r, &encoded, &mut output_buf, 0, flags); assert_eq!(res, (TINFLStatus::HasMoreOutput, 2, 0)); } } diff -Nru cargo-0.44.1/vendor/miniz_oxide/src/inflate/mod.rs cargo-0.47.0/vendor/miniz_oxide/src/inflate/mod.rs --- cargo-0.44.1/vendor/miniz_oxide/src/inflate/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/miniz_oxide/src/inflate/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,7 +1,10 @@ //! This module contains functionality for decompression. -use std::io::Cursor; -use std::usize; +use ::core::cmp::min; +use ::core::usize; +use crate::alloc::boxed::Box; +use crate::alloc::vec; +use crate::alloc::vec::Vec; pub mod core; mod output_buffer; @@ -59,7 +62,7 @@ /// Returns a status and an integer representing where the decompressor failed on failure. #[inline] pub fn decompress_to_vec(input: &[u8]) -> Result, TINFLStatus> { - decompress_to_vec_inner(input, 0) + decompress_to_vec_inner(input, 0, usize::max_value()) } /// Decompress the deflate-encoded data (with a zlib wrapper) in `input` to a vector. @@ -67,25 +70,53 @@ /// Returns a status and an integer representing where the decompressor failed on failure. #[inline] pub fn decompress_to_vec_zlib(input: &[u8]) -> Result, TINFLStatus> { - decompress_to_vec_inner(input, inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER) + decompress_to_vec_inner( + input, + inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER, + usize::max_value(), + ) } -fn decompress_to_vec_inner(input: &[u8], flags: u32) -> Result, TINFLStatus> { +/// Decompress the deflate-encoded data in `input` to a vector. +/// The vector is grown to at most `max_size` bytes; if the data does not fit in that size, +/// `TINFLStatus::HasMoreOutput` error is returned. +/// +/// Returns a status and an integer representing where the decompressor failed on failure. +#[inline] +pub fn decompress_to_vec_with_limit(input: &[u8], max_size: usize) -> Result, TINFLStatus> { + decompress_to_vec_inner(input, 0, max_size) +} + +/// Decompress the deflate-encoded data (with a zlib wrapper) in `input` to a vector. +/// The vector is grown to at most `max_size` bytes; if the data does not fit in that size, +/// `TINFLStatus::HasMoreOutput` error is returned. +/// +/// Returns a status and an integer representing where the decompressor failed on failure. +#[inline] +pub fn decompress_to_vec_zlib_with_limit( + input: &[u8], + max_size: usize, +) -> Result, TINFLStatus> { + decompress_to_vec_inner(input, inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER, max_size) +} + +fn decompress_to_vec_inner( + input: &[u8], + flags: u32, + max_output_size: usize, +) -> Result, TINFLStatus> { let flags = flags | inflate_flags::TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; - let mut ret: Vec = vec![0; input.len() * 2]; + let mut ret: Vec = vec![0; min(input.len().saturating_mul(2), max_output_size)]; let mut decomp = Box::::default(); let mut in_pos = 0; let mut out_pos = 0; loop { - let (status, in_consumed, out_consumed) = { - // Wrap the whole output slice so we know we have enough of the - // decompressed data for matches. - let mut c = Cursor::new(ret.as_mut_slice()); - c.set_position(out_pos as u64); - decompress(&mut decomp, &input[in_pos..], &mut c, flags) - }; + // Wrap the whole output slice so we know we have enough of the + // decompressed data for matches. + let (status, in_consumed, out_consumed) = + decompress(&mut decomp, &input[in_pos..], &mut ret, out_pos, flags); in_pos += in_consumed; out_pos += out_consumed; @@ -96,8 +127,15 @@ } TINFLStatus::HasMoreOutput => { - // We need more space so resize the buffer. - ret.resize(ret.len() + out_pos, 0); + // We need more space, so check if we can resize the buffer and do it. + let new_len = ret + .len() + .checked_add(out_pos) + .ok_or(TINFLStatus::HasMoreOutput)?; + if new_len > max_output_size { + return Err(TINFLStatus::HasMoreOutput); + }; + ret.resize(new_len, 0); } _ => return Err(status), @@ -107,14 +145,30 @@ #[cfg(test)] mod test { - use super::decompress_to_vec_zlib; + use super::TINFLStatus; + use super::{decompress_to_vec_zlib, decompress_to_vec_zlib_with_limit}; + const encoded: [u8; 20] = [ + 120, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4, 19, + ]; #[test] fn decompress_vec() { - let encoded = [ - 120, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4, 19, - ]; let res = decompress_to_vec_zlib(&encoded[..]).unwrap(); assert_eq!(res.as_slice(), &b"Hello, zlib!"[..]); } + + #[test] + fn decompress_vec_with_high_limit() { + let res = decompress_to_vec_zlib_with_limit(&encoded[..], 100_000).unwrap(); + assert_eq!(res.as_slice(), &b"Hello, zlib!"[..]); + } + + #[test] + fn fail_to_decompress_with_limit() { + let res = decompress_to_vec_zlib_with_limit(&encoded[..], 8); + match res { + Err(TINFLStatus::HasMoreOutput) => (), // expected result + _ => panic!("Decompression output size limit was not enforced"), + } + } } diff -Nru cargo-0.44.1/vendor/miniz_oxide/src/inflate/stream.rs cargo-0.47.0/vendor/miniz_oxide/src/inflate/stream.rs --- cargo-0.44.1/vendor/miniz_oxide/src/inflate/stream.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/miniz_oxide/src/inflate/stream.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,13 +1,59 @@ //! Extra streaming decompression functionality. //! //! As of now this is mainly inteded for use to build a higher-level wrapper. -use std::io::Cursor; -use std::{cmp, mem}; +use crate::alloc::boxed::Box; +use core::{cmp, mem}; use crate::inflate::core::{decompress, inflate_flags, DecompressorOxide, TINFL_LZ_DICT_SIZE}; use crate::inflate::TINFLStatus; use crate::{DataFormat, MZError, MZFlush, MZResult, MZStatus, StreamResult}; +/// Tag that determines reset policy of [InflateState](struct.InflateState.html) +pub trait ResetPolicy { + /// Performs reset + fn reset(&self, state: &mut InflateState); +} + +/// Resets state, without performing expensive ops (e.g. zeroing buffer) +/// +/// Note that not zeroing buffer can lead to security issues when dealing with untrusted input. +pub struct MinReset; + +impl ResetPolicy for MinReset { + fn reset(&self, state: &mut InflateState) { + state.decompressor().init(); + state.dict_ofs = 0; + state.dict_avail = 0; + state.first_call = true; + state.has_flushed = false; + state.last_status = TINFLStatus::NeedsMoreInput; + } +} + +/// Resets state and zero memory, continuing to use the same data format. +pub struct ZeroReset; + +impl ResetPolicy for ZeroReset { + #[inline] + fn reset(&self, state: &mut InflateState) { + MinReset.reset(state); + state.dict = [0; TINFL_LZ_DICT_SIZE]; + } +} + +/// Full reset of the state, including zeroing memory. +/// +/// Requires to provide new data format. +pub struct FullReset(pub DataFormat); + +impl ResetPolicy for FullReset { + #[inline] + fn reset(&self, state: &mut InflateState) { + ZeroReset.reset(state); + state.data_format = self.0; + } +} + /// A struct that compbines a decompressor with extra data for streaming decompression. /// pub struct InflateState { @@ -95,17 +141,17 @@ b } + #[inline] /// Reset the decompressor without re-allocating memory, using the given /// data format. pub fn reset(&mut self, data_format: DataFormat) { - self.decompressor().init(); - self.dict = [0; TINFL_LZ_DICT_SIZE]; - self.dict_ofs = 0; - self.dict_avail = 0; - self.first_call = true; - self.has_flushed = false; - self.data_format = data_format; - self.last_status = TINFLStatus::NeedsMoreInput; + self.reset_as(FullReset(data_format)); + } + + #[inline] + /// Resets the state according to specified policy. + pub fn reset_as(&mut self, policy: T) { + policy.reset(self) } } @@ -152,12 +198,7 @@ if (flush == MZFlush::Finish) && first_call { decomp_flags |= inflate_flags::TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; - let status = decompress( - &mut state.decomp, - next_in, - &mut Cursor::new(next_out), - decomp_flags, - ); + let status = decompress(&mut state.decomp, next_in, next_out, 0, decomp_flags); let in_bytes = status.1; let out_bytes = status.2; let status = status.0; @@ -230,11 +271,13 @@ ) -> MZResult { let orig_in_len = next_in.len(); loop { - let status = { - let mut cursor = Cursor::new(&mut state.dict[..]); - cursor.set_position(state.dict_ofs as u64); - decompress(&mut state.decomp, *next_in, &mut cursor, decomp_flags) - }; + let status = decompress( + &mut state.decomp, + *next_in, + &mut state.dict, + state.dict_ofs, + decomp_flags, + ); let in_bytes = status.1; let out_bytes = status.2; @@ -301,6 +344,8 @@ mod test { use super::{inflate, InflateState}; use crate::{DataFormat, MZFlush, MZStatus}; + use std::vec; + #[test] fn test_state() { let encoded = [ @@ -315,7 +360,17 @@ assert_eq!(out[..res.bytes_written as usize], b"Hello, zlib!"[..]); assert_eq!(res.bytes_consumed, encoded.len()); - state.reset(DataFormat::Zlib); + state.reset_as(super::ZeroReset); + out.iter_mut().map(|x| *x = 0).count(); + let res = inflate(&mut state, &encoded, &mut out, MZFlush::Finish); + let status = res.status.expect("Failed to decompress!"); + assert_eq!(status, MZStatus::StreamEnd); + assert_eq!(out[..res.bytes_written as usize], b"Hello, zlib!"[..]); + assert_eq!(res.bytes_consumed, encoded.len()); + + state.reset_as(super::MinReset); + out.iter_mut().map(|x| *x = 0).count(); + let res = inflate(&mut state, &encoded, &mut out, MZFlush::Finish); let status = res.status.expect("Failed to decompress!"); assert_eq!(status, MZStatus::StreamEnd); assert_eq!(out[..res.bytes_written as usize], b"Hello, zlib!"[..]); diff -Nru cargo-0.44.1/vendor/miniz_oxide/src/lib.rs cargo-0.47.0/vendor/miniz_oxide/src/lib.rs --- cargo-0.44.1/vendor/miniz_oxide/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/miniz_oxide/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -21,9 +21,17 @@ //! //! ``` +#![allow(warnings)] #![forbid(unsafe_code)] +#![cfg_attr(has_alloc, no_std)] -extern crate adler32; +#[cfg(has_alloc)] +extern crate alloc; +#[cfg(not(has_alloc))] +use std as alloc; + +#[cfg(test)] +extern crate std; pub mod deflate; pub mod inflate; @@ -145,13 +153,13 @@ } } -impl std::convert::From for MZResult { +impl core::convert::From for MZResult { fn from(res: StreamResult) -> Self { res.status } } -impl std::convert::From<&StreamResult> for MZResult { +impl core::convert::From<&StreamResult> for MZResult { fn from(res: &StreamResult) -> Self { res.status } diff -Nru cargo-0.44.1/vendor/miniz_oxide/src/shared.rs cargo-0.47.0/vendor/miniz_oxide/src/shared.rs --- cargo-0.44.1/vendor/miniz_oxide/src/shared.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/miniz_oxide/src/shared.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,4 @@ -use adler32::RollingAdler32; +use adler::Adler32; #[doc(hidden)] pub const MZ_ADLER32_INIT: u32 = 1; @@ -12,7 +12,7 @@ #[doc(hidden)] pub fn update_adler32(adler: u32, data: &[u8]) -> u32 { - let mut hash = RollingAdler32::from_value(adler); - hash.update_buffer(data); - hash.hash() + let mut hash = Adler32::from_checksum(adler); + hash.write_slice(data); + hash.checksum() } diff -Nru cargo-0.44.1/vendor/miow/.cargo-checksum.json cargo-0.47.0/vendor/miow/.cargo-checksum.json --- cargo-0.44.1/vendor/miow/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/miow/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"22dfdd1d51b2639a5abd17ed07005c3af05fb7a2a3b1a1d0d7af1000a520c1c7"} \ No newline at end of file +{"files":{},"package":"07b88fb9795d4d36d62a012dfbf49a8f5cf12751f36d31a9dbe66d528e58979e"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/miow/Cargo.toml cargo-0.47.0/vendor/miow/Cargo.toml --- cargo-0.44.1/vendor/miow/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/miow/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "miow" -version = "0.3.4" +version = "0.3.5" authors = ["Alex Crichton "] description = "A zero overhead I/O library for Windows, focusing on IOCP and Async I/O\nabstractions.\n" homepage = "https://github.com/yoshuawuyts/miow" @@ -22,6 +22,9 @@ keywords = ["iocp", "windows", "io", "overlapped"] license = "MIT/Apache-2.0" repository = "https://github.com/yoshuawuyts/miow" +[package.metadata.docs.rs] +default-target = "x86_64-pc-windows-msvc" +targets = ["aarch64-pc-windows-msvc", "i686-pc-windows-msvc", "x86_64-pc-windows-msvc"] [dependencies.socket2] version = "0.3" diff -Nru cargo-0.44.1/vendor/openssl/build.rs cargo-0.47.0/vendor/openssl/build.rs --- cargo-0.44.1/vendor/openssl/build.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,7 +1,9 @@ +#![allow(clippy::inconsistent_digit_grouping)] + use std::env; fn main() { - if let Ok(_) = env::var("DEP_OPENSSL_LIBRESSL") { + if env::var("DEP_OPENSSL_LIBRESSL").is_ok() { println!("cargo:rustc-cfg=libressl"); } @@ -10,7 +12,7 @@ } if let Ok(vars) = env::var("DEP_OPENSSL_CONF") { - for var in vars.split(",") { + for var in vars.split(',') { println!("cargo:rustc-cfg=osslconf=\"{}\"", var); } } diff -Nru cargo-0.44.1/vendor/openssl/.cargo-checksum.json cargo-0.47.0/vendor/openssl/.cargo-checksum.json --- cargo-0.44.1/vendor/openssl/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"cee6d85f4cb4c4f59a6a85d5b68a233d280c82e29e822913b9c8b129fbf20bdd"} \ No newline at end of file +{"files":{},"package":"8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/openssl/Cargo.lock cargo-0.47.0/vendor/openssl/Cargo.lock --- cargo-0.44.1/vendor/openssl/Cargo.lock 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/Cargo.lock 2020-10-01 21:38:28.000000000 +0000 @@ -14,9 +14,9 @@ [[package]] name = "cc" -version = "1.0.50" +version = "1.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" +checksum = "b1be3409f94d7bdceeb5f5fac551039d9b3f00e25da7a74fc4d33400a0d96368" [[package]] name = "cfg-if" @@ -59,13 +59,13 @@ [[package]] name = "libc" -version = "0.2.68" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea0c0405123bba743ee3f91f49b1c7cfb684eef0da0a50110f758ccf24cdff0" +checksum = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49" [[package]] name = "openssl" -version = "0.10.29" +version = "0.10.30" dependencies = [ "bitflags", "cfg-if", @@ -79,18 +79,18 @@ [[package]] name = "openssl-src" -version = "111.8.1+1.1.1f" +version = "111.10.0+1.1.1g" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04f0299a91de598dde58d2e99101895498dcf3d58896a3297798f28b27c8b72" +checksum = "47cd4a96d49c3abf4cac8e8a80cba998a030c75608f158fb1c5f609772f265e6" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.55" +version = "0.9.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7717097d810a0f2e2323f9e5d11e71608355e24828410b55b9d4f18aa5f9a5d8" +checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" dependencies = [ "autocfg", "cc", @@ -145,9 +145,9 @@ [[package]] name = "remove_dir_all" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ "winapi", ] @@ -164,9 +164,9 @@ [[package]] name = "vcpkg" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fc439f2794e98976c88a2a2dafce96b930fe8010b0a256b3c2199a773933168" +checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" [[package]] name = "winapi" diff -Nru cargo-0.44.1/vendor/openssl/Cargo.toml cargo-0.47.0/vendor/openssl/Cargo.toml --- cargo-0.44.1/vendor/openssl/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "openssl" -version = "0.10.29" +version = "0.10.30" authors = ["Steven Fackler "] description = "OpenSSL bindings" readme = "README.md" @@ -36,7 +36,7 @@ version = "0.2" [dependencies.openssl-sys] -version = "0.9.55" +version = "0.9.58" [dev-dependencies.hex] version = "0.3" diff -Nru cargo-0.44.1/vendor/openssl/CHANGELOG.md cargo-0.47.0/vendor/openssl/CHANGELOG.md --- cargo-0.44.1/vendor/openssl/CHANGELOG.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/CHANGELOG.md 2020-10-01 21:38:28.000000000 +0000 @@ -2,6 +2,29 @@ ## [Unreleased] +## [v0.10.30] - 2020-06-25 + +### Fixed + +* `DsaRef::private_key_to_pem` can no longer be called without a private key. + +### Changed + +* Improved the `Debug` implementations of many types. + +### Added + +* Added `is_empty` implementations for `Asn1StringRef` and `Asn1BitStringRef`. +* Added `EcPointRef::{to_pem, to_dir}` and `EcKeyRef::{public_key_from_pem, public_key_from_der}`. +* Added `Default` implementations for many types. +* Added `Debug` implementations for many types. +* Added `SslRef::set_mtu`. +* Added `Cipher::{aes_128_ocb, aes_192_ocb, aes_256_ocb}`. + +### Deprecated + +* Deprecated `SslStreamBuilder::set_dtls_mtu_size` in favor of `SslRef::set_mtu`. + ## [v0.10.29] - 2020-04-07 ### Fixed @@ -447,7 +470,8 @@ Look at the [release tags] for information about older releases. -[Unreleased]: https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.29...master +[Unreleased]: https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.30...master +[v0.10.30]: https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.29...openssl-v0.10.30 [v0.10.29]: https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.28...openssl-v0.10.29 [v0.10.28]: https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.27...openssl-v0.10.28 [v0.10.27]: https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.26...openssl-v0.10.27 diff -Nru cargo-0.44.1/vendor/openssl/.pc/disable-vendor.patch/Cargo.toml cargo-0.47.0/vendor/openssl/.pc/disable-vendor.patch/Cargo.toml --- cargo-0.44.1/vendor/openssl/.pc/disable-vendor.patch/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/.pc/disable-vendor.patch/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "openssl" -version = "0.10.29" +version = "0.10.30" authors = ["Steven Fackler "] description = "OpenSSL bindings" readme = "README.md" @@ -36,7 +36,7 @@ version = "0.2" [dependencies.openssl-sys] -version = "0.9.55" +version = "0.9.58" [dev-dependencies.hex] version = "0.3" diff -Nru cargo-0.44.1/vendor/openssl/README.md cargo-0.47.0/vendor/openssl/README.md --- cargo-0.44.1/vendor/openssl/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -1,7 +1,6 @@ # rust-openssl [![CircleCI](https://circleci.com/gh/sfackler/rust-openssl.svg?style=shield)](https://circleci.com/gh/sfackler/rust-openssl) -[![Build status](https://ci.appveyor.com/api/projects/status/d1knobws948pyynk/branch/master?svg=true)](https://ci.appveyor.com/project/sfackler/rust-openssl/branch/master) [![crates.io](https://img.shields.io/crates/v/openssl.svg)](https://crates.io/crates/openssl) OpenSSL bindings for the Rust programming language. diff -Nru cargo-0.44.1/vendor/openssl/src/asn1.rs cargo-0.47.0/vendor/openssl/src/asn1.rs --- cargo-0.44.1/vendor/openssl/src/asn1.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/asn1.rs 2020-10-01 21:38:28.000000000 +0000 @@ -67,12 +67,18 @@ impl fmt::Display for Asn1GeneralizedTimeRef { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { unsafe { - let mem_bio = MemBio::new()?; - cvt(ffi::ASN1_GENERALIZEDTIME_print( + let mem_bio = match MemBio::new() { + Err(_) => return f.write_str("error"), + Ok(m) => m, + }; + let print_result = cvt(ffi::ASN1_GENERALIZEDTIME_print( mem_bio.as_ptr(), self.as_ptr(), - ))?; - write!(f, "{}", str::from_utf8_unchecked(mem_bio.get_buf())) + )); + match print_result { + Err(_) => f.write_str("error"), + Ok(_) => f.write_str(str::from_utf8_unchecked(mem_bio.get_buf())), + } } } } @@ -124,17 +130,14 @@ #[cfg(ossl102)] pub fn diff(&self, compare: &Self) -> Result { let mut days = 0; - let mut seconds = 0; + let mut secs = 0; let other = compare.as_ptr(); - let err = unsafe { ffi::ASN1_TIME_diff(&mut days, &mut seconds, self.as_ptr(), other) }; + let err = unsafe { ffi::ASN1_TIME_diff(&mut days, &mut secs, self.as_ptr(), other) }; match err { 0 => Err(ErrorStack::get()), - _ => Ok(TimeDiff { - days: days, - secs: seconds, - }), + _ => Ok(TimeDiff { days, secs }), } } @@ -210,13 +213,25 @@ impl fmt::Display for Asn1TimeRef { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { unsafe { - let mem_bio = MemBio::new()?; - cvt(ffi::ASN1_TIME_print(mem_bio.as_ptr(), self.as_ptr()))?; - write!(f, "{}", str::from_utf8_unchecked(mem_bio.get_buf())) + let mem_bio = match MemBio::new() { + Err(_) => return f.write_str("error"), + Ok(m) => m, + }; + let print_result = cvt(ffi::ASN1_TIME_print(mem_bio.as_ptr(), self.as_ptr())); + match print_result { + Err(_) => f.write_str("error"), + Ok(_) => f.write_str(str::from_utf8_unchecked(mem_bio.get_buf())), + } } } } +impl fmt::Debug for Asn1TimeRef { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(&self.to_string()) + } +} + impl Asn1Time { fn new() -> Result { ffi::init(); @@ -256,6 +271,7 @@ /// This corresponds to [`ASN1_TIME_set_string`]. /// /// [`ASN1_TIME_set_string`]: https://www.openssl.org/docs/manmaster/man3/ASN1_TIME_set_string.html + #[allow(clippy::should_implement_trait)] pub fn from_str(s: &str) -> Result { unsafe { let s = CString::new(s).unwrap(); @@ -370,9 +386,9 @@ } } - /// Return the string as an array of bytes + /// Return the string as an array of bytes. /// - /// The bytes do not directly corespond to UTF-8 encoding. To interact with + /// The bytes do not directly correspond to UTF-8 encoding. To interact with /// strings in rust, it is preferable to use [`as_utf8`] /// /// [`as_utf8`]: struct.Asn1String.html#method.as_utf8 @@ -380,10 +396,24 @@ unsafe { slice::from_raw_parts(ASN1_STRING_get0_data(self.as_ptr()), self.len()) } } - /// Return the length of the Asn1String (number of bytes) + /// Returns the number of bytes in the string. pub fn len(&self) -> usize { unsafe { ffi::ASN1_STRING_length(self.as_ptr()) as usize } } + + /// Determines if the string is empty. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +impl fmt::Debug for Asn1StringRef { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match self.as_utf8() { + Ok(openssl_string) => openssl_string.fmt(fmt), + Err(_) => fmt.write_str("error"), + } + } } foreign_type_and_impl_send_sync! { @@ -467,14 +497,20 @@ } impl Asn1BitStringRef { - /// Returns the Asn1BitString as a slice + /// Returns the Asn1BitString as a slice. pub fn as_slice(&self) -> &[u8] { unsafe { slice::from_raw_parts(ASN1_STRING_get0_data(self.as_ptr() as *mut _), self.len()) } } - /// Length of Asn1BitString in number of bytes. + + /// Returns the number of bytes in the string. pub fn len(&self) -> usize { unsafe { ffi::ASN1_STRING_length(self.as_ptr() as *const _) as usize } } + + /// Determines if the string is empty. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } } foreign_type_and_impl_send_sync! { @@ -518,12 +554,20 @@ self.as_ptr(), 0, ); - let s = str::from_utf8(&buf[..len as usize]).map_err(|_| fmt::Error)?; - fmt.write_str(s) + match str::from_utf8(&buf[..len as usize]) { + Err(_) => fmt.write_str("error"), + Ok(s) => fmt.write_str(s), + } } } } +impl fmt::Debug for Asn1ObjectRef { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str(self.to_string().as_str()) + } +} + cfg_if! { if #[cfg(any(ossl110, libressl273))] { use ffi::ASN1_STRING_get0_data; diff -Nru cargo-0.44.1/vendor/openssl/src/base64.rs cargo-0.47.0/vendor/openssl/src/base64.rs --- cargo-0.44.1/vendor/openssl/src/base64.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/base64.rs 2020-10-01 21:38:28.000000000 +0000 @@ -63,7 +63,7 @@ out.set_len(out_len as usize); } - if src.ends_with("=") { + if src.ends_with('=') { out.pop(); if src.ends_with("==") { out.pop(); diff -Nru cargo-0.44.1/vendor/openssl/src/bn.rs cargo-0.47.0/vendor/openssl/src/bn.rs --- cargo-0.44.1/vendor/openssl/src/bn.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/bn.rs 2020-10-01 21:38:28.000000000 +0000 @@ -12,16 +12,13 @@ //! use openssl::bn::BigNum; //! use openssl::error::ErrorStack; //! -//! fn bignums() -> Result<(), ErrorStack> { +//! fn main() -> Result<(), ErrorStack> { //! let a = BigNum::new()?; // a = 0 //! let b = BigNum::from_dec_str("1234567890123456789012345")?; //! let c = &a * &b; //! assert_eq!(a, c); //! Ok(()) //! } -//! # fn main() { -//! # bignums(); -//! # } //! ``` //! //! [`BIGNUM`]: https://wiki.openssl.org/index.php/Manual:Bn_internal(3) @@ -191,6 +188,7 @@ /// OpenSSL documentation at [`BN_div_word`] /// /// [`BN_div_word`]: https://www.openssl.org/docs/man1.1.0/crypto/BN_div_word.html + #[allow(clippy::identity_conversion)] pub fn div_word(&mut self, w: u32) -> Result { unsafe { let r = ffi::BN_div_word(self.as_ptr(), w.into()); @@ -207,6 +205,7 @@ /// OpenSSL documentation at [`BN_mod_word`] /// /// [`BN_mod_word`]: https://www.openssl.org/docs/man1.1.0/crypto/BN_mod_word.html + #[allow(clippy::identity_conversion)] pub fn mod_word(&self, w: u32) -> Result { unsafe { let r = ffi::BN_mod_word(self.as_ptr(), w.into()); @@ -244,6 +243,7 @@ /// OpenSSL documentation at [`BN_set_bit`] /// /// [`BN_set_bit`]: https://www.openssl.org/docs/man1.1.0/crypto/BN_set_bit.html + #[allow(clippy::identity_conversion)] pub fn set_bit(&mut self, n: i32) -> Result<(), ErrorStack> { unsafe { cvt(ffi::BN_set_bit(self.as_ptr(), n.into())).map(|_| ()) } } @@ -255,6 +255,7 @@ /// OpenSSL documentation at [`BN_clear_bit`] /// /// [`BN_clear_bit`]: https://www.openssl.org/docs/man1.1.0/crypto/BN_clear_bit.html + #[allow(clippy::identity_conversion)] pub fn clear_bit(&mut self, n: i32) -> Result<(), ErrorStack> { unsafe { cvt(ffi::BN_clear_bit(self.as_ptr(), n.into())).map(|_| ()) } } @@ -264,6 +265,7 @@ /// OpenSSL documentation at [`BN_is_bit_set`] /// /// [`BN_is_bit_set`]: https://www.openssl.org/docs/man1.1.0/crypto/BN_is_bit_set.html + #[allow(clippy::identity_conversion)] pub fn is_bit_set(&self, n: i32) -> bool { unsafe { ffi::BN_is_bit_set(self.as_ptr(), n.into()) == 1 } } @@ -275,6 +277,7 @@ /// OpenSSL documentation at [`BN_mask_bits`] /// /// [`BN_mask_bits`]: https://www.openssl.org/docs/man1.1.0/crypto/BN_mask_bits.html + #[allow(clippy::identity_conversion)] pub fn mask_bits(&mut self, n: i32) -> Result<(), ErrorStack> { unsafe { cvt(ffi::BN_mask_bits(self.as_ptr(), n.into())).map(|_| ()) } } @@ -322,6 +325,7 @@ /// OpenSSL documentation at [`BN_lshift`] /// /// [`BN_lshift`]: https://www.openssl.org/docs/man1.1.0/crypto/BN_lshift.html + #[allow(clippy::identity_conversion)] pub fn lshift(&mut self, a: &BigNumRef, n: i32) -> Result<(), ErrorStack> { unsafe { cvt(ffi::BN_lshift(self.as_ptr(), a.as_ptr(), n.into())).map(|_| ()) } } @@ -331,6 +335,7 @@ /// OpenSSL documentation at [`BN_rshift`] /// /// [`BN_rshift`]: https://www.openssl.org/docs/man1.1.0/crypto/BN_rshift.html + #[allow(clippy::identity_conversion)] pub fn rshift(&mut self, a: &BigNumRef, n: i32) -> Result<(), ErrorStack> { unsafe { cvt(ffi::BN_rshift(self.as_ptr(), a.as_ptr(), n.into())).map(|_| ()) } } @@ -416,6 +421,7 @@ /// /// [`constants`]: index.html#constants /// [`BN_rand`]: https://www.openssl.org/docs/man1.1.0/crypto/BN_rand.html + #[allow(clippy::identity_conversion)] pub fn rand(&mut self, bits: i32, msb: MsbOption, odd: bool) -> Result<(), ErrorStack> { unsafe { cvt(ffi::BN_rand( @@ -433,6 +439,7 @@ /// OpenSSL documentation at [`BN_psuedo_rand`] /// /// [`BN_psuedo_rand`]: https://www.openssl.org/docs/man1.1.0/crypto/BN_pseudo_rand.html + #[allow(clippy::identity_conversion)] pub fn pseudo_rand(&mut self, bits: i32, msb: MsbOption, odd: bool) -> Result<(), ErrorStack> { unsafe { cvt(ffi::BN_pseudo_rand( @@ -811,7 +818,7 @@ /// # Return Value /// /// Returns `true` if `self` is prime with an error probability of less than `0.25 ^ checks`. - + #[allow(clippy::identity_conversion)] pub fn is_prime(&self, checks: i32, ctx: &mut BigNumContextRef) -> Result { unsafe { cvt_n(ffi::BN_is_prime_ex( @@ -837,6 +844,7 @@ /// # Return Value /// /// Returns `true` if `self` is prime with an error probability of less than `0.25 ^ checks`. + #[allow(clippy::identity_conversion)] pub fn is_prime_fasttest( &self, checks: i32, @@ -1372,16 +1380,16 @@ #[test] fn test_to_from_slice() { - let v0 = BigNum::from_u32(10203004).unwrap(); + let v0 = BigNum::from_u32(10_203_004).unwrap(); let vec = v0.to_vec(); let v1 = BigNum::from_slice(&vec).unwrap(); - assert!(v0 == v1); + assert_eq!(v0, v1); } #[test] fn test_negation() { - let a = BigNum::from_u32(909829283).unwrap(); + let a = BigNum::from_u32(909_829_283).unwrap(); assert!(!a.is_negative()); assert!((-a).is_negative()); @@ -1389,15 +1397,14 @@ #[test] fn test_shift() { - let a = BigNum::from_u32(909829283).unwrap(); - use std::ops::{Shl, Shr}; + let a = BigNum::from_u32(909_829_283).unwrap(); - assert!(a == a.shl(1).shr(1)); + assert_eq!(a, &(&a << 1) >> 1); } #[test] fn test_rand_range() { - let range = BigNum::from_u32(909829283).unwrap(); + let range = BigNum::from_u32(909_829_283).unwrap(); let mut result = BigNum::from_dec_str(&range.to_dec_str().unwrap()).unwrap(); range.rand_range(&mut result).unwrap(); assert!(result >= BigNum::from_u32(0).unwrap() && result < range); @@ -1405,7 +1412,7 @@ #[test] fn test_pseudo_rand_range() { - let range = BigNum::from_u32(909829283).unwrap(); + let range = BigNum::from_u32(909_829_283).unwrap(); let mut result = BigNum::from_dec_str(&range.to_dec_str().unwrap()).unwrap(); range.pseudo_rand_range(&mut result).unwrap(); assert!(result >= BigNum::from_u32(0).unwrap() && result < range); @@ -1413,7 +1420,7 @@ #[test] fn test_prime_numbers() { - let a = BigNum::from_u32(19029017).unwrap(); + let a = BigNum::from_u32(19_029_017).unwrap(); let mut p = BigNum::new().unwrap(); p.generate_prime(128, true, None, Some(&a)).unwrap(); diff -Nru cargo-0.44.1/vendor/openssl/src/cms.rs cargo-0.47.0/vendor/openssl/src/cms.rs --- cargo-0.44.1/vendor/openssl/src/cms.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/cms.rs 2020-10-01 21:38:28.000000000 +0000 @@ -81,7 +81,6 @@ let pkey = pkey.as_ptr(); let cert = cert.as_ptr(); let out = MemBio::new()?; - let flags: u32 = 0; cvt(ffi::CMS_decrypt( self.as_ptr(), @@ -89,7 +88,7 @@ cert, ptr::null_mut(), out.as_ptr(), - flags.into(), + 0, ))?; Ok(out.get_buf().to_owned()) diff -Nru cargo-0.44.1/vendor/openssl/src/conf.rs cargo-0.47.0/vendor/openssl/src/conf.rs --- cargo-0.44.1/vendor/openssl/src/conf.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/conf.rs 2020-10-01 21:38:28.000000000 +0000 @@ -18,6 +18,10 @@ } /// Construct from raw pointer. + /// + /// # Safety + /// + /// The caller must ensure that the pointer is valid. pub unsafe fn from_ptr(ptr: *mut ffi::CONF_METHOD) -> ConfMethod { ConfMethod(ptr) } diff -Nru cargo-0.44.1/vendor/openssl/src/derive.rs cargo-0.47.0/vendor/openssl/src/derive.rs --- cargo-0.44.1/vendor/openssl/src/derive.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/derive.rs 2020-10-01 21:38:28.000000000 +0000 @@ -14,6 +14,7 @@ unsafe impl<'a> Sync for Deriver<'a> {} unsafe impl<'a> Send for Deriver<'a> {} +#[allow(clippy::len_without_is_empty)] impl<'a> Deriver<'a> { /// Creates a new `Deriver` using the provided private key. /// diff -Nru cargo-0.44.1/vendor/openssl/src/dsa.rs cargo-0.47.0/vendor/openssl/src/dsa.rs --- cargo-0.44.1/vendor/openssl/src/dsa.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/dsa.rs 2020-10-01 21:38:28.000000000 +0000 @@ -80,26 +80,6 @@ where T: HasPublic, { - private_key_to_pem! { - /// Serializes the private key to a PEM-encoded DSAPrivateKey structure. - /// - /// The output will have a header of `-----BEGIN DSA PRIVATE KEY-----`. - /// - /// This corresponds to [`PEM_write_bio_DSAPrivateKey`]. - /// - /// [`PEM_write_bio_DSAPrivateKey`]: https://www.openssl.org/docs/man1.1.0/crypto/PEM_write_bio_DSAPrivateKey.html - private_key_to_pem, - /// Serializes the private key to a PEM-encoded encrypted DSAPrivateKey structure. - /// - /// The output will have a header of `-----BEGIN DSA PRIVATE KEY-----`. - /// - /// This corresponds to [`PEM_write_bio_DSAPrivateKey`]. - /// - /// [`PEM_write_bio_DSAPrivateKey`]: https://www.openssl.org/docs/man1.1.0/crypto/PEM_write_bio_DSAPrivateKey.html - private_key_to_pem_passphrase, - ffi::PEM_write_bio_DSAPrivateKey - } - to_pem! { /// Serialies the public key into a PEM-encoded SubjectPublicKeyInfo structure. /// @@ -136,6 +116,26 @@ where T: HasPrivate, { + private_key_to_pem! { + /// Serializes the private key to a PEM-encoded DSAPrivateKey structure. + /// + /// The output will have a header of `-----BEGIN DSA PRIVATE KEY-----`. + /// + /// This corresponds to [`PEM_write_bio_DSAPrivateKey`]. + /// + /// [`PEM_write_bio_DSAPrivateKey`]: https://www.openssl.org/docs/man1.1.0/crypto/PEM_write_bio_DSAPrivateKey.html + private_key_to_pem, + /// Serializes the private key to a PEM-encoded encrypted DSAPrivateKey structure. + /// + /// The output will have a header of `-----BEGIN DSA PRIVATE KEY-----`. + /// + /// This corresponds to [`PEM_write_bio_DSAPrivateKey`]. + /// + /// [`PEM_write_bio_DSAPrivateKey`]: https://www.openssl.org/docs/man1.1.0/crypto/PEM_write_bio_DSAPrivateKey.html + private_key_to_pem_passphrase, + ffi::PEM_write_bio_DSAPrivateKey + } + /// Returns a reference to the private key component of `self`. pub fn priv_key(&self) -> &BigNumRef { unsafe { @@ -451,6 +451,7 @@ } #[test] + #[allow(clippy::redundant_clone)] fn clone() { let key = Dsa::generate(2048).unwrap(); drop(key.clone()); diff -Nru cargo-0.44.1/vendor/openssl/src/ecdsa.rs cargo-0.47.0/vendor/openssl/src/ecdsa.rs --- cargo-0.44.1/vendor/openssl/src/ecdsa.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/ecdsa.rs 2020-10-01 21:38:28.000000000 +0000 @@ -197,11 +197,11 @@ let verification2 = res .verify(String::from("hello2").as_bytes(), &public_key) .unwrap(); - assert!(verification2 == false); + assert!(!verification2); // Signature will not be verified using the correct data but the incorrect public key let verification3 = res.verify(data.as_bytes(), &public_key2).unwrap(); - assert!(verification3 == false); + assert!(!verification3); } #[test] diff -Nru cargo-0.44.1/vendor/openssl/src/ec.rs cargo-0.47.0/vendor/openssl/src/ec.rs --- cargo-0.44.1/vendor/openssl/src/ec.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/ec.rs 2020-10-01 21:38:28.000000000 +0000 @@ -15,22 +15,6 @@ //! [`EcGroup`]: struct.EcGroup.html //! [`Nid`]: ../nid/struct.Nid.html //! [Eliptic Curve Cryptography]: https://wiki.openssl.org/index.php/Elliptic_Curve_Cryptography -//! -//! # Examples -//! -//! ``` -//! use openssl::ec::{EcGroup, EcPoint}; -//! use openssl::nid::Nid; -//! use openssl::error::ErrorStack; -//! fn get_ec_point() -> Result { -//! let group = EcGroup::from_curve_name(Nid::SECP224R1)?; -//! let point = EcPoint::new(&group)?; -//! Ok(point) -//! } -//! # fn main() { -//! # let _ = get_ec_point(); -//! # } -//! ``` use ffi; use foreign_types::{ForeignType, ForeignTypeRef}; use libc::c_int; @@ -347,6 +331,7 @@ group: &EcGroupRef, q: &EcPointRef, m: &BigNumRef, + // FIXME should be &mut ctx: &BigNumContextRef, ) -> Result<(), ErrorStack> { unsafe { @@ -367,6 +352,7 @@ &mut self, group: &EcGroupRef, n: &BigNumRef, + // FIXME should be &mut ctx: &BigNumContextRef, ) -> Result<(), ErrorStack> { unsafe { @@ -656,6 +642,28 @@ EcPointRef::from_ptr(ptr as *mut _) } } + + to_pem! { + /// Serialies the public key into a PEM-encoded SubjectPublicKeyInfo structure. + /// + /// The output will have a header of `-----BEGIN PUBLIC KEY-----`. + /// + /// This corresponds to [`PEM_write_bio_EC_PUBKEY`]. + /// + /// [`PEM_write_bio_EC_PUBKEY`]: https://www.openssl.org/docs/man1.1.0/crypto/PEM_write_bio_EC_PUBKEY.html + public_key_to_pem, + ffi::PEM_write_bio_EC_PUBKEY + } + + to_der! { + /// Serializes the public key into a DER-encoded SubjectPublicKeyInfo structure. + /// + /// This corresponds to [`i2d_EC_PUBKEY`]. + /// + /// [`i2d_EC_PUBKEY`]: https://www.openssl.org/docs/man1.1.0/crypto/i2d_EC_PUBKEY.html + public_key_to_der, + ffi::i2d_EC_PUBKEY + } } impl EcKeyRef @@ -792,6 +800,30 @@ }) } } + + from_pem! { + /// Decodes a PEM-encoded SubjectPublicKeyInfo structure containing a EC key. + /// + /// The input should have a header of `-----BEGIN PUBLIC KEY-----`. + /// + /// This corresponds to [`PEM_read_bio_EC_PUBKEY`]. + /// + /// [`PEM_read_bio_EC_PUBKEY`]: https://www.openssl.org/docs/man1.1.0/crypto/PEM_read_bio_EC_PUBKEY.html + public_key_from_pem, + EcKey, + ffi::PEM_read_bio_EC_PUBKEY + } + + from_der! { + /// Decodes a DER-encoded SubjectPublicKeyInfo structure containing a EC key. + /// + /// This corresponds to [`d2i_EC_PUBKEY`]. + /// + /// [`d2i_EC_PUBKEY`]: https://www.openssl.org/docs/man1.1.0/crypto/d2i_EC_PUBKEY.html + public_key_from_der, + EcKey, + ffi::d2i_EC_PUBKEY + } } impl EcKey { @@ -917,6 +949,7 @@ } #[test] + #[allow(clippy::redundant_clone)] fn dup() { let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); let key = EcKey::generate(&group).unwrap(); @@ -959,7 +992,7 @@ let mut ctx = BigNumContext::new().unwrap(); let mut public_key = EcPoint::new(&group).unwrap(); public_key - .mul_generator(&group, key.private_key(), &mut ctx) + .mul_generator(&group, key.private_key(), &ctx) .unwrap(); assert!(public_key.eq(&group, key.public_key(), &mut ctx).unwrap()); } @@ -971,7 +1004,7 @@ let one = BigNum::from_u32(1).unwrap(); let mut ctx = BigNumContext::new().unwrap(); let mut ecp = EcPoint::new(&group).unwrap(); - ecp.mul_generator(&group, &one, &mut ctx).unwrap(); + ecp.mul_generator(&group, &one, &ctx).unwrap(); assert!(ecp.eq(&group, gen, &mut ctx).unwrap()); } @@ -998,9 +1031,8 @@ let dup_key = EcKey::from_private_components(&group, key.private_key(), key.public_key()).unwrap(); - let res = dup_key.check_key().unwrap(); + dup_key.check_key().unwrap(); - assert!(res == ()); assert!(key.private_key() == dup_key.private_key()); } diff -Nru cargo-0.44.1/vendor/openssl/src/envelope.rs cargo-0.47.0/vendor/openssl/src/envelope.rs --- cargo-0.44.1/vendor/openssl/src/envelope.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/envelope.rs 2020-10-01 21:38:28.000000000 +0000 @@ -92,6 +92,7 @@ } /// Returns the initialization vector, if the cipher uses one. + #[allow(clippy::option_as_ref_deref)] pub fn iv(&self) -> Option<&[u8]> { self.iv.as_ref().map(|v| &**v) } diff -Nru cargo-0.44.1/vendor/openssl/src/error.rs cargo-0.47.0/vendor/openssl/src/error.rs --- cargo-0.44.1/vendor/openssl/src/error.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/error.rs 2020-10-01 21:38:28.000000000 +0000 @@ -161,7 +161,7 @@ None } else { ptr::copy_nonoverlapping(data.as_ptr(), ptr as *mut u8, data.len()); - *ptr.offset(data.len() as isize) = 0; + *ptr.add(data.len()) = 0; Some((ptr, ffi::ERR_TXT_MALLOCED)) } } @@ -229,6 +229,7 @@ } /// Returns additional data describing the error. + #[allow(clippy::option_as_ref_deref)] pub fn data(&self) -> Option<&str> { self.data.as_ref().map(|s| &**s) } diff -Nru cargo-0.44.1/vendor/openssl/src/ex_data.rs cargo-0.47.0/vendor/openssl/src/ex_data.rs --- cargo-0.44.1/vendor/openssl/src/ex_data.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/ex_data.rs 2020-10-01 21:38:28.000000000 +0000 @@ -16,10 +16,16 @@ } impl Index { + /// Creates an `Index` from a raw integer index. + /// + /// # Safety + /// + /// The caller must ensure that the index correctly maps to a `U` value stored in a `T`. pub unsafe fn from_raw(idx: c_int) -> Index { Index(idx, PhantomData) } + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn as_raw(&self) -> c_int { self.0 } diff -Nru cargo-0.44.1/vendor/openssl/src/hash.rs cargo-0.47.0/vendor/openssl/src/hash.rs --- cargo-0.44.1/vendor/openssl/src/hash.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/hash.rs 2020-10-01 21:38:28.000000000 +0000 @@ -3,6 +3,7 @@ use std::io; use std::io::prelude::*; use std::ops::{Deref, DerefMut}; +use std::ptr; use error::ErrorStack; use nid::Nid; @@ -20,6 +21,11 @@ pub struct MessageDigest(*const ffi::EVP_MD); impl MessageDigest { + /// Creates a `MessageDigest` from a raw OpenSSL pointer. + /// + /// # Safety + /// + /// The caller must ensure the pointer is valid. pub unsafe fn from_ptr(x: *const ffi::EVP_MD) -> Self { MessageDigest(x) } @@ -102,16 +108,19 @@ unsafe { MessageDigest(ffi::EVP_ripemd160()) } } + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn as_ptr(&self) -> *const ffi::EVP_MD { self.0 } - /// The size of the digest in bytes + /// The size of the digest in bytes. + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn size(&self) -> usize { unsafe { ffi::EVP_MD_size(self.0) as usize } } - /// The name of the digest + /// The name of the digest. + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn type_(&self) -> Nid { Nid::from_raw(unsafe { ffi::EVP_MD_type(self.0) }) } @@ -199,7 +208,7 @@ let ctx = unsafe { cvt_p(EVP_MD_CTX_new())? }; let mut h = Hasher { - ctx: ctx, + ctx, md: ty.as_ptr(), type_: ty, state: Finalized, @@ -217,7 +226,7 @@ Finalized => (), } unsafe { - cvt(ffi::EVP_DigestInit_ex(self.ctx, self.md, 0 as *mut _))?; + cvt(ffi::EVP_DigestInit_ex(self.ctx, self.md, ptr::null_mut()))?; } self.state = Reset; Ok(()) @@ -254,7 +263,7 @@ ))?; self.state = Finalized; Ok(DigestBytes { - buf: buf, + buf, len: len as usize, }) } @@ -301,7 +310,7 @@ ctx }; Hasher { - ctx: ctx, + ctx, md: self.md, type_: self.type_, state: self.state, @@ -407,14 +416,13 @@ } fn hash_recycle_test(h: &mut Hasher, hashtest: &(&str, &str)) { - let _ = h.write_all(&Vec::from_hex(hashtest.0).unwrap()).unwrap(); + h.write_all(&Vec::from_hex(hashtest.0).unwrap()).unwrap(); let res = h.finish().unwrap(); assert_eq!(hex::encode(res), hashtest.1); } // Test vectors from http://www.nsrl.nist.gov/testdata/ - #[allow(non_upper_case_globals)] - const md5_tests: [(&'static str, &'static str); 13] = [ + const MD5_TESTS: [(&str, &str); 13] = [ ("", "d41d8cd98f00b204e9800998ecf8427e"), ("7F", "83acb6e67e50e31db6ed341dd2de1595"), ("EC9C", "0b07f0d4ca797d8ac58874f887cb0b68"), @@ -435,7 +443,7 @@ #[test] fn test_md5() { - for test in md5_tests.iter() { + for test in MD5_TESTS.iter() { hash_test(MessageDigest::md5(), test); } } @@ -443,7 +451,7 @@ #[test] fn test_md5_recycle() { let mut h = Hasher::new(MessageDigest::md5()).unwrap(); - for test in md5_tests.iter() { + for test in MD5_TESTS.iter() { hash_recycle_test(&mut h, test); } } @@ -451,7 +459,7 @@ #[test] fn test_finish_twice() { let mut h = Hasher::new(MessageDigest::md5()).unwrap(); - h.write_all(&Vec::from_hex(md5_tests[6].0).unwrap()) + h.write_all(&Vec::from_hex(MD5_TESTS[6].0).unwrap()) .unwrap(); h.finish().unwrap(); let res = h.finish().unwrap(); @@ -460,9 +468,10 @@ } #[test] + #[allow(clippy::redundant_clone)] fn test_clone() { let i = 7; - let inp = Vec::from_hex(md5_tests[i].0).unwrap(); + let inp = Vec::from_hex(MD5_TESTS[i].0).unwrap(); assert!(inp.len() > 2); let p = inp.len() / 2; let h0 = Hasher::new(MessageDigest::md5()).unwrap(); @@ -475,18 +484,18 @@ let mut h2 = h1.clone(); h2.write_all(&inp[p..]).unwrap(); let res = h2.finish().unwrap(); - assert_eq!(hex::encode(res), md5_tests[i].1); + assert_eq!(hex::encode(res), MD5_TESTS[i].1); } h1.write_all(&inp[p..]).unwrap(); let res = h1.finish().unwrap(); - assert_eq!(hex::encode(res), md5_tests[i].1); + assert_eq!(hex::encode(res), MD5_TESTS[i].1); println!("Clone a finished hasher"); let mut h3 = h1.clone(); - h3.write_all(&Vec::from_hex(md5_tests[i + 1].0).unwrap()) + h3.write_all(&Vec::from_hex(MD5_TESTS[i + 1].0).unwrap()) .unwrap(); let res = h3.finish().unwrap(); - assert_eq!(hex::encode(res), md5_tests[i + 1].1); + assert_eq!(hex::encode(res), MD5_TESTS[i + 1].1); } #[test] diff -Nru cargo-0.44.1/vendor/openssl/src/nid.rs cargo-0.47.0/vendor/openssl/src/nid.rs --- cargo-0.44.1/vendor/openssl/src/nid.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/nid.rs 2020-10-01 21:38:28.000000000 +0000 @@ -55,6 +55,7 @@ } /// Return the integer representation of a `Nid`. + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn as_raw(&self) -> c_int { self.0 } @@ -62,6 +63,7 @@ /// Returns the `Nid`s of the digest and public key algorithms associated with a signature ID. /// /// This corresponds to `OBJ_find_sigid_algs`. + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn signature_algorithms(&self) -> Option { unsafe { let mut digest = 0; @@ -81,6 +83,7 @@ /// This corresponds to [`OBJ_nid2ln`] /// /// [`OBJ_nid2ln`]: https://www.openssl.org/docs/man1.1.0/crypto/OBJ_nid2ln.html + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn long_name(&self) -> Result<&'static str, ErrorStack> { unsafe { cvt_p(ffi::OBJ_nid2ln(self.0) as *mut c_char) @@ -92,6 +95,7 @@ /// This corresponds to [`OBJ_nid2sn`] /// /// [`OBJ_nid2sn`]: https://www.openssl.org/docs/man1.1.0/crypto/OBJ_nid2sn.html + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn short_name(&self) -> Result<&'static str, ErrorStack> { unsafe { cvt_p(ffi::OBJ_nid2sn(self.0) as *mut c_char) diff -Nru cargo-0.44.1/vendor/openssl/src/ocsp.rs cargo-0.47.0/vendor/openssl/src/ocsp.rs --- cargo-0.44.1/vendor/openssl/src/ocsp.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/ocsp.rs 2020-10-01 21:38:28.000000000 +0000 @@ -32,14 +32,6 @@ pub struct OcspResponseStatus(c_int); impl OcspResponseStatus { - pub fn from_raw(raw: c_int) -> OcspResponseStatus { - OcspResponseStatus(raw) - } - - pub fn as_raw(&self) -> c_int { - self.0 - } - pub const SUCCESSFUL: OcspResponseStatus = OcspResponseStatus(ffi::OCSP_RESPONSE_STATUS_SUCCESSFUL); pub const MALFORMED_REQUEST: OcspResponseStatus = @@ -52,37 +44,39 @@ OcspResponseStatus(ffi::OCSP_RESPONSE_STATUS_SIGREQUIRED); pub const UNAUTHORIZED: OcspResponseStatus = OcspResponseStatus(ffi::OCSP_RESPONSE_STATUS_UNAUTHORIZED); + + pub fn from_raw(raw: c_int) -> OcspResponseStatus { + OcspResponseStatus(raw) + } + + #[allow(clippy::trivially_copy_pass_by_ref)] + pub fn as_raw(&self) -> c_int { + self.0 + } } #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct OcspCertStatus(c_int); impl OcspCertStatus { + pub const GOOD: OcspCertStatus = OcspCertStatus(ffi::V_OCSP_CERTSTATUS_GOOD); + pub const REVOKED: OcspCertStatus = OcspCertStatus(ffi::V_OCSP_CERTSTATUS_REVOKED); + pub const UNKNOWN: OcspCertStatus = OcspCertStatus(ffi::V_OCSP_CERTSTATUS_UNKNOWN); + pub fn from_raw(raw: c_int) -> OcspCertStatus { OcspCertStatus(raw) } + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn as_raw(&self) -> c_int { self.0 } - - pub const GOOD: OcspCertStatus = OcspCertStatus(ffi::V_OCSP_CERTSTATUS_GOOD); - pub const REVOKED: OcspCertStatus = OcspCertStatus(ffi::V_OCSP_CERTSTATUS_REVOKED); - pub const UNKNOWN: OcspCertStatus = OcspCertStatus(ffi::V_OCSP_CERTSTATUS_UNKNOWN); } #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct OcspRevokedStatus(c_int); impl OcspRevokedStatus { - pub fn from_raw(raw: c_int) -> OcspRevokedStatus { - OcspRevokedStatus(raw) - } - - pub fn as_raw(&self) -> c_int { - self.0 - } - pub const NO_STATUS: OcspRevokedStatus = OcspRevokedStatus(ffi::OCSP_REVOKED_STATUS_NOSTATUS); pub const UNSPECIFIED: OcspRevokedStatus = OcspRevokedStatus(ffi::OCSP_REVOKED_STATUS_UNSPECIFIED); @@ -100,6 +94,15 @@ OcspRevokedStatus(ffi::OCSP_REVOKED_STATUS_CERTIFICATEHOLD); pub const REMOVE_FROM_CRL: OcspRevokedStatus = OcspRevokedStatus(ffi::OCSP_REVOKED_STATUS_REMOVEFROMCRL); + + pub fn from_raw(raw: c_int) -> OcspRevokedStatus { + OcspRevokedStatus(raw) + } + + #[allow(clippy::trivially_copy_pass_by_ref)] + pub fn as_raw(&self) -> c_int { + self.0 + } } pub struct OcspStatus<'a> { @@ -190,10 +193,11 @@ } else { Some(Asn1GeneralizedTimeRef::from_ptr(revocation_time)) }; + Some(OcspStatus { status: OcspCertStatus(status), reason: OcspRevokedStatus(status), - revocation_time: revocation_time, + revocation_time, this_update: Asn1GeneralizedTimeRef::from_ptr(this_update), next_update: Asn1GeneralizedTimeRef::from_ptr(next_update), }) diff -Nru cargo-0.44.1/vendor/openssl/src/pkcs12.rs cargo-0.47.0/vendor/openssl/src/pkcs12.rs --- cargo-0.44.1/vendor/openssl/src/pkcs12.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/pkcs12.rs 2020-10-01 21:38:28.000000000 +0000 @@ -58,11 +58,7 @@ Some(Stack::from_ptr(chain)) }; - Ok(ParsedPkcs12 { - pkey: pkey, - cert: cert, - chain: chain, - }) + Ok(ParsedPkcs12 { pkey, cert, chain }) } } } diff -Nru cargo-0.44.1/vendor/openssl/src/pkcs5.rs cargo-0.47.0/vendor/openssl/src/pkcs5.rs --- cargo-0.44.1/vendor/openssl/src/pkcs5.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/pkcs5.rs 2020-10-01 21:38:28.000000000 +0000 @@ -23,6 +23,7 @@ /// /// New applications should not use this and instead use /// `pbkdf2_hmac` or another more modern key derivation algorithm. +#[allow(clippy::identity_conversion)] pub fn bytes_to_key( cipher: Cipher, digest: MessageDigest, @@ -75,7 +76,7 @@ iv_ptr, ))?; - Ok(KeyIvPair { key: key, iv: iv }) + Ok(KeyIvPair { key, iv }) } } diff -Nru cargo-0.44.1/vendor/openssl/src/pkcs7.rs cargo-0.47.0/vendor/openssl/src/pkcs7.rs --- cargo-0.44.1/vendor/openssl/src/pkcs7.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/pkcs7.rs 2020-10-01 21:38:28.000000000 +0000 @@ -303,7 +303,7 @@ let cert = include_bytes!("../test/cert.pem"); let cert = X509::from_pem(cert).unwrap(); let certs = Stack::new().unwrap(); - let message: String = String::from("foo"); + let message = "foo"; let flags = Pkcs7Flags::STREAM | Pkcs7Flags::DETACHED; let pkey = include_bytes!("../test/key.pem"); let pkey = PKey::private_key_from_pem(pkey).unwrap(); @@ -336,11 +336,8 @@ ) .expect("should succeed"); - assert_eq!(message.clone().into_bytes(), output); - assert_eq!( - message.clone().into_bytes(), - content.expect("should be non-empty") - ); + assert_eq!(output, message.as_bytes()); + assert_eq!(content.expect("should be non-empty"), message.as_bytes()); } #[test] @@ -348,7 +345,7 @@ let cert = include_bytes!("../test/cert.pem"); let cert = X509::from_pem(cert).unwrap(); let certs = Stack::new().unwrap(); - let message: String = String::from("foo"); + let message = "foo"; let flags = Pkcs7Flags::STREAM; let pkey = include_bytes!("../test/key.pem"); let pkey = PKey::private_key_from_pem(pkey).unwrap(); @@ -375,7 +372,7 @@ .verify(&certs, &store, None, Some(&mut output), flags) .expect("should succeed"); - assert_eq!(message.clone().into_bytes(), output); + assert_eq!(output, message.as_bytes()); assert!(content.is_none()); } diff -Nru cargo-0.44.1/vendor/openssl/src/pkey.rs cargo-0.47.0/vendor/openssl/src/pkey.rs --- cargo-0.44.1/vendor/openssl/src/pkey.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/pkey.rs 2020-10-01 21:38:28.000000000 +0000 @@ -49,6 +49,7 @@ use foreign_types::{ForeignType, ForeignTypeRef}; use libc::{c_int, c_long}; use std::ffi::CString; +use std::fmt; use std::mem; use std::ptr; @@ -58,6 +59,8 @@ use ec::EcKey; use error::ErrorStack; use rsa::Rsa; +#[cfg(ossl110)] +use symm::Cipher; use util::{invoke_passwd_cb, CallbackState}; use {cvt, cvt_p}; @@ -75,16 +78,6 @@ pub struct Id(c_int); impl Id { - /// Creates a `Id` from an integer representation. - pub fn from_raw(value: c_int) -> Id { - Id(value) - } - - /// Returns the integer representation of the `Id`. - pub fn as_raw(&self) -> c_int { - self.0 - } - pub const RSA: Id = Id(ffi::EVP_PKEY_RSA); pub const HMAC: Id = Id(ffi::EVP_PKEY_HMAC); pub const DSA: Id = Id(ffi::EVP_PKEY_DSA); @@ -95,6 +88,17 @@ pub const ED25519: Id = Id(ffi::EVP_PKEY_ED25519); #[cfg(ossl111)] pub const ED448: Id = Id(ffi::EVP_PKEY_ED448); + + /// Creates a `Id` from an integer representation. + pub fn from_raw(value: c_int) -> Id { + Id(value) + } + + /// Returns the integer representation of the `Id`. + #[allow(clippy::trivially_copy_pass_by_ref)] + pub fn as_raw(&self) -> c_int { + self.0 + } } /// A trait indicating that a key has parameters. @@ -283,6 +287,25 @@ } } +impl fmt::Debug for PKey { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let alg = match self.id() { + Id::RSA => "RSA", + Id::HMAC => "HMAC", + Id::DSA => "DSA", + Id::DH => "DH", + Id::EC => "EC", + #[cfg(ossl111)] + Id::ED25519 => "Ed25519", + #[cfg(ossl111)] + Id::ED448 => "Ed448", + _ => "unknown", + }; + fmt.debug_struct("PKey").field("algorithm", &alg).finish() + // TODO: Print details for each specific type of key + } +} + impl Clone for PKey { fn clone(&self) -> PKey { PKeyRef::to_owned(self) @@ -394,7 +417,8 @@ /// /// To compute CMAC values, use the `sign` module. #[cfg(ossl110)] - pub fn cmac(cipher: &::symm::Cipher, key: &[u8]) -> Result, ErrorStack> { + #[allow(clippy::trivially_copy_pass_by_ref)] + pub fn cmac(cipher: &Cipher, key: &[u8]) -> Result, ErrorStack> { unsafe { assert!(key.len() <= c_int::max_value() as usize); let kctx = cvt_p(ffi::EVP_PKEY_CTX_new_id( @@ -527,10 +551,7 @@ /// Deserializes a DER-formatted PKCS#8 unencrypted private key. /// /// This method is mainly for interoperability reasons. Encrypted keyfiles should be preferred. - pub fn private_key_from_pkcs8( - der: &[u8], - ) -> Result, ErrorStack> - { + pub fn private_key_from_pkcs8(der: &[u8]) -> Result, ErrorStack> { unsafe { ffi::init(); let len = der.len().min(c_long::max_value() as usize) as c_long; @@ -539,8 +560,7 @@ &mut der.as_ptr(), len, ))?; - let res = cvt_p(ffi::EVP_PKCS82PKEY(p8inf)) - .map(|p| PKey::from_ptr(p)); + let res = cvt_p(ffi::EVP_PKCS82PKEY(p8inf)).map(|p| PKey::from_ptr(p)); ffi::PKCS8_PRIV_KEY_INFO_free(p8inf); res } diff -Nru cargo-0.44.1/vendor/openssl/src/rsa.rs cargo-0.47.0/vendor/openssl/src/rsa.rs --- cargo-0.44.1/vendor/openssl/src/rsa.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/rsa.rs 2020-10-01 21:38:28.000000000 +0000 @@ -49,20 +49,21 @@ pub struct Padding(c_int); impl Padding { + pub const NONE: Padding = Padding(ffi::RSA_NO_PADDING); + pub const PKCS1: Padding = Padding(ffi::RSA_PKCS1_PADDING); + pub const PKCS1_OAEP: Padding = Padding(ffi::RSA_PKCS1_OAEP_PADDING); + pub const PKCS1_PSS: Padding = Padding(ffi::RSA_PKCS1_PSS_PADDING); + /// Creates a `Padding` from an integer representation. pub fn from_raw(value: c_int) -> Padding { Padding(value) } /// Returns the integer representation of `Padding`. + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn as_raw(&self) -> c_int { self.0 } - - pub const NONE: Padding = Padding(ffi::RSA_NO_PADDING); - pub const PKCS1: Padding = Padding(ffi::RSA_PKCS1_PADDING); - pub const PKCS1_OAEP: Padding = Padding(ffi::RSA_PKCS1_OAEP_PADDING); - pub const PKCS1_PSS: Padding = Padding(ffi::RSA_PKCS1_PSS_PADDING); } generic_foreign_type_and_impl_send_sync! { @@ -579,6 +580,7 @@ /// /// This a convenience method over /// `Rsa::build(n, e, d)?.set_factors(p, q)?.set_crt_params(dmp1, dmq1, iqmp)?.build()` + #[allow(clippy::too_many_arguments, clippy::many_single_char_names)] pub fn from_private_components( n: BigNum, e: BigNum, @@ -895,11 +897,11 @@ let keypair = super::Rsa::generate(2048).unwrap(); let pubkey_pem = keypair.public_key_to_pem_pkcs1().unwrap(); let pubkey = super::Rsa::public_key_from_pem_pkcs1(&pubkey_pem).unwrap(); - let msg = "Hello, world!".as_bytes(); + let msg = b"Hello, world!"; let mut encrypted = vec![0; pubkey.size() as usize]; let len = pubkey - .public_encrypt(&msg, &mut encrypted, Padding::PKCS1) + .public_encrypt(msg, &mut encrypted, Padding::PKCS1) .unwrap(); assert!(len > msg.len()); let mut decrypted = vec![0; keypair.size() as usize]; @@ -907,7 +909,7 @@ .private_decrypt(&encrypted, &mut decrypted, Padding::PKCS1) .unwrap(); assert_eq!(len, msg.len()); - assert_eq!("Hello, world!", String::from_utf8_lossy(&decrypted[..len])); + assert_eq!(&decrypted[..len], msg); } #[test] @@ -915,15 +917,15 @@ let keypair = super::Rsa::generate(2048).unwrap(); let pubkey_pem = keypair.public_key_to_pem_pkcs1().unwrap(); let pubkey = super::Rsa::public_key_from_pem_pkcs1(&pubkey_pem).unwrap(); - let msg = "foo".as_bytes(); + let msg = b"foo"; let mut encrypted1 = vec![0; pubkey.size() as usize]; let mut encrypted2 = vec![0; pubkey.size() as usize]; let len1 = pubkey - .public_encrypt(&msg, &mut encrypted1, Padding::PKCS1) + .public_encrypt(msg, &mut encrypted1, Padding::PKCS1) .unwrap(); let len2 = pubkey - .public_encrypt(&msg, &mut encrypted2, Padding::PKCS1) + .public_encrypt(msg, &mut encrypted2, Padding::PKCS1) .unwrap(); assert!(len1 > (msg.len() + 1)); assert_eq!(len1, len2); @@ -931,6 +933,7 @@ } #[test] + #[allow(clippy::redundant_clone)] fn clone() { let key = Rsa::generate(2048).unwrap(); drop(key.clone()); diff -Nru cargo-0.44.1/vendor/openssl/src/sha.rs cargo-0.47.0/vendor/openssl/src/sha.rs --- cargo-0.44.1/vendor/openssl/src/sha.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/sha.rs 2020-10-01 21:38:28.000000000 +0000 @@ -118,6 +118,13 @@ #[derive(Clone)] pub struct Sha1(ffi::SHA_CTX); +impl Default for Sha1 { + #[inline] + fn default() -> Sha1 { + Sha1::new() + } +} + impl Sha1 { /// Creates a new hasher. #[inline] @@ -156,6 +163,13 @@ #[derive(Clone)] pub struct Sha224(ffi::SHA256_CTX); +impl Default for Sha224 { + #[inline] + fn default() -> Sha224 { + Sha224::new() + } +} + impl Sha224 { /// Creates a new hasher. #[inline] @@ -194,6 +208,13 @@ #[derive(Clone)] pub struct Sha256(ffi::SHA256_CTX); +impl Default for Sha256 { + #[inline] + fn default() -> Sha256 { + Sha256::new() + } +} + impl Sha256 { /// Creates a new hasher. #[inline] @@ -232,6 +253,13 @@ #[derive(Clone)] pub struct Sha384(ffi::SHA512_CTX); +impl Default for Sha384 { + #[inline] + fn default() -> Sha384 { + Sha384::new() + } +} + impl Sha384 { /// Creates a new hasher. #[inline] @@ -270,6 +298,13 @@ #[derive(Clone)] pub struct Sha512(ffi::SHA512_CTX); +impl Default for Sha512 { + #[inline] + fn default() -> Sha512 { + Sha512::new() + } +} + impl Sha512 { /// Creates a new hasher. #[inline] diff -Nru cargo-0.44.1/vendor/openssl/src/sign.rs cargo-0.47.0/vendor/openssl/src/sign.rs --- cargo-0.44.1/vendor/openssl/src/sign.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/sign.rs 2020-10-01 21:38:28.000000000 +0000 @@ -123,6 +123,7 @@ } } +#[allow(clippy::len_without_is_empty)] impl<'a> Signer<'a> { /// Creates a new `Signer`. /// @@ -342,7 +343,7 @@ Ok(buf) } - /// Signs the data in data_buf and writes the siganture into the buffer sig_buf, returning the + /// Signs the data in data_buf and writes the signature into the buffer sig_buf, returning the /// number of bytes written. /// /// For PureEdDSA (Ed25519 and Ed448 keys) this is the only way to sign data. @@ -354,7 +355,11 @@ /// /// [`EVP_DigestSign`]: https://www.openssl.org/docs/man1.1.1/man3/EVP_DigestSign.html #[cfg(ossl111)] - pub fn sign_oneshot(&mut self, sig_buf: &mut [u8], data_buf: &[u8]) -> Result { + pub fn sign_oneshot( + &mut self, + sig_buf: &mut [u8], + data_buf: &[u8], + ) -> Result { unsafe { let mut sig_len = sig_buf.len(); cvt(ffi::EVP_DigestSign( @@ -641,12 +646,12 @@ use rsa::{Padding, Rsa}; use sign::{RsaPssSaltlen, Signer, Verifier}; - const INPUT: &'static str = + const INPUT: &str = "65794a68624763694f694a53557a49314e694a392e65794a7063334d694f694a71623255694c41304b49434a6c\ 654841694f6a457a4d4441344d546b7a4f44417344516f67496d6830644841364c79396c654746746347786c4c\ 6d4e76625339706331397962323930496a7030636e566c6651"; - const SIGNATURE: &'static str = + const SIGNATURE: &str = "702e218943e88fd11eb5d82dbf7845f34106ae1b81fff7731116add1717d83656d420afd3c96eedd73a2663e51\ 66687b000b87226e0187ed1073f945e582adfcef16d85a798ee8c66ddb3db8975b17d09402beedd5d9d9700710\ 8db28160d5f8040ca7445762b81fbe7ff9d92e0ae76f24f25b33bbe6f44ae61eb1040acb20044d3ef9128ed401\ diff -Nru cargo-0.44.1/vendor/openssl/src/srtp.rs cargo-0.47.0/vendor/openssl/src/srtp.rs --- cargo-0.44.1/vendor/openssl/src/srtp.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/srtp.rs 2020-10-01 21:38:28.000000000 +0000 @@ -38,20 +38,21 @@ pub struct SrtpProfileId(c_ulong); impl SrtpProfileId { + pub const SRTP_AES128_CM_SHA1_80: SrtpProfileId = SrtpProfileId(ffi::SRTP_AES128_CM_SHA1_80); + pub const SRTP_AES128_CM_SHA1_32: SrtpProfileId = SrtpProfileId(ffi::SRTP_AES128_CM_SHA1_32); + pub const SRTP_AES128_F8_SHA1_80: SrtpProfileId = SrtpProfileId(ffi::SRTP_AES128_F8_SHA1_80); + pub const SRTP_AES128_F8_SHA1_32: SrtpProfileId = SrtpProfileId(ffi::SRTP_AES128_F8_SHA1_32); + pub const SRTP_NULL_SHA1_80: SrtpProfileId = SrtpProfileId(ffi::SRTP_NULL_SHA1_80); + pub const SRTP_NULL_SHA1_32: SrtpProfileId = SrtpProfileId(ffi::SRTP_NULL_SHA1_32); + /// Creates a `SrtpProfileId` from an integer representation. pub fn from_raw(value: c_ulong) -> SrtpProfileId { SrtpProfileId(value) } /// Returns the integer representation of `SrtpProfileId`. + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn as_raw(&self) -> c_ulong { self.0 } - - pub const SRTP_AES128_CM_SHA1_80: SrtpProfileId = SrtpProfileId(ffi::SRTP_AES128_CM_SHA1_80); - pub const SRTP_AES128_CM_SHA1_32: SrtpProfileId = SrtpProfileId(ffi::SRTP_AES128_CM_SHA1_32); - pub const SRTP_AES128_F8_SHA1_80: SrtpProfileId = SrtpProfileId(ffi::SRTP_AES128_F8_SHA1_80); - pub const SRTP_AES128_F8_SHA1_32: SrtpProfileId = SrtpProfileId(ffi::SRTP_AES128_F8_SHA1_32); - pub const SRTP_NULL_SHA1_80: SrtpProfileId = SrtpProfileId(ffi::SRTP_NULL_SHA1_80); - pub const SRTP_NULL_SHA1_32: SrtpProfileId = SrtpProfileId(ffi::SRTP_NULL_SHA1_32); } diff -Nru cargo-0.44.1/vendor/openssl/src/ssl/bio.rs cargo-0.47.0/vendor/openssl/src/ssl/bio.rs --- cargo-0.44.1/vendor/openssl/src/ssl/bio.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/ssl/bio.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,12 +1,11 @@ use ffi::{ self, BIO_clear_retry_flags, BIO_new, BIO_set_retry_read, BIO_set_retry_write, BIO, - BIO_CTRL_FLUSH, BIO_CTRL_DGRAM_QUERY_MTU, + BIO_CTRL_DGRAM_QUERY_MTU, BIO_CTRL_FLUSH, }; use libc::{c_char, c_int, c_long, c_void, strlen}; use std::any::Any; use std::io; use std::io::prelude::*; -use std::mem; use std::panic::{catch_unwind, AssertUnwindSafe}; use std::ptr; use std::slice; @@ -37,7 +36,7 @@ let method = BioMethod::new::(); let state = Box::new(StreamState { - stream: stream, + stream, error: None, panic: None, dtls_mtu_size: 0, @@ -48,7 +47,7 @@ BIO_set_data(bio, Box::into_raw(state) as *mut _); BIO_set_init(bio, 1); - return Ok((bio, method)); + Ok((bio, method)) } } @@ -63,7 +62,7 @@ } pub unsafe fn get_ref<'a, S: 'a>(bio: *mut BIO) -> &'a S { - let state: &'a StreamState = mem::transmute(BIO_get_data(bio)); + let state = &*(BIO_get_data(bio) as *const StreamState); &state.stream } @@ -73,7 +72,10 @@ pub unsafe fn set_dtls_mtu_size(bio: *mut BIO, mtu_size: usize) { if mtu_size as u64 > c_long::max_value() as u64 { - panic!("Given MTU size {} can't be represented in a positive `c_long` range") + panic!( + "Given MTU size {} can't be represented in a positive `c_long` range", + mtu_size + ) } state::(bio).dtls_mtu_size = mtu_size as c_long; } @@ -207,7 +209,7 @@ assert!(ffi::BIO_meth_set_ctrl(ptr, ctrl::) != 0); assert!(ffi::BIO_meth_set_create(ptr, create) != 0); assert!(ffi::BIO_meth_set_destroy(ptr, destroy::) != 0); - return ret; + ret } } diff -Nru cargo-0.44.1/vendor/openssl/src/ssl/callbacks.rs cargo-0.47.0/vendor/openssl/src/ssl/callbacks.rs --- cargo-0.44.1/vendor/openssl/src/ssl/callbacks.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/ssl/callbacks.rs 2020-10-01 21:38:28.000000000 +0000 @@ -116,10 +116,10 @@ .ssl_context() .ex_data(callback_idx) .expect("BUG: psk callback missing") as *const F; - let identity = if identity != ptr::null() { - Some(CStr::from_ptr(identity).to_bytes()) - } else { + let identity = if identity.is_null() { None + } else { + Some(CStr::from_ptr(identity).to_bytes()) }; // Give the callback mutable slices into which it can write the psk. let psk_sl = slice::from_raw_parts_mut(psk as *mut u8, max_psk_len as usize); diff -Nru cargo-0.44.1/vendor/openssl/src/ssl/connector.rs cargo-0.47.0/vendor/openssl/src/ssl/connector.rs --- cargo-0.44.1/vendor/openssl/src/ssl/connector.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/ssl/connector.rs 2020-10-01 21:38:28.000000000 +0000 @@ -20,6 +20,7 @@ -----END DH PARAMETERS----- "; +#[allow(clippy::inconsistent_digit_grouping)] fn ctx(method: SslMethod) -> Result { let mut ctx = SslContextBuilder::new(method)?; @@ -55,7 +56,7 @@ /// /// OpenSSL's built in hostname verification is used when linking against OpenSSL 1.0.2 or 1.1.0, /// and a custom implementation is used when linking against OpenSSL 1.0.1. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct SslConnector(SslContext); impl SslConnector { diff -Nru cargo-0.44.1/vendor/openssl/src/ssl/error.rs cargo-0.47.0/vendor/openssl/src/ssl/error.rs --- cargo-0.44.1/vendor/openssl/src/ssl/error.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/ssl/error.rs 2020-10-01 21:38:28.000000000 +0000 @@ -14,14 +14,6 @@ pub struct ErrorCode(c_int); impl ErrorCode { - pub fn from_raw(raw: c_int) -> ErrorCode { - ErrorCode(raw) - } - - pub fn as_raw(&self) -> c_int { - self.0 - } - /// The SSL session has been closed. pub const ZERO_RETURN: ErrorCode = ErrorCode(ffi::SSL_ERROR_ZERO_RETURN); @@ -46,6 +38,15 @@ /// Requires OpenSSL 1.1.1 or newer. #[cfg(ossl111)] pub const WANT_CLIENT_HELLO_CB: ErrorCode = ErrorCode(ffi::SSL_ERROR_WANT_CLIENT_HELLO_CB); + + pub fn from_raw(raw: c_int) -> ErrorCode { + ErrorCode(raw) + } + + #[allow(clippy::trivially_copy_pass_by_ref)] + pub fn as_raw(&self) -> c_int { + self.0 + } } #[derive(Debug)] diff -Nru cargo-0.44.1/vendor/openssl/src/ssl/mod.rs cargo-0.47.0/vendor/openssl/src/ssl/mod.rs --- cargo-0.44.1/vendor/openssl/src/ssl/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/ssl/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -338,11 +338,16 @@ } /// Constructs an `SslMethod` from a pointer to the underlying OpenSSL value. + /// + /// # Safety + /// + /// The caller must ensure the pointer is valid. pub unsafe fn from_ptr(ptr: *const ffi::SSL_METHOD) -> SslMethod { SslMethod(ptr) } /// Returns a pointer to the underlying OpenSSL value. + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn as_ptr(&self) -> *const ffi::SSL_METHOD { self.0 } @@ -444,16 +449,6 @@ pub struct SslFiletype(c_int); impl SslFiletype { - /// Constructs an `SslFiletype` from a raw OpenSSL value. - pub fn from_raw(raw: c_int) -> SslFiletype { - SslFiletype(raw) - } - - /// Returns the raw OpenSSL value represented by this type. - pub fn as_raw(&self) -> c_int { - self.0 - } - /// The PEM format. /// /// This corresponds to `SSL_FILETYPE_PEM`. @@ -463,6 +458,17 @@ /// /// This corresponds to `SSL_FILETYPE_ASN1`. pub const ASN1: SslFiletype = SslFiletype(ffi::SSL_FILETYPE_ASN1); + + /// Constructs an `SslFiletype` from a raw OpenSSL value. + pub fn from_raw(raw: c_int) -> SslFiletype { + SslFiletype(raw) + } + + /// Returns the raw OpenSSL value represented by this type. + #[allow(clippy::trivially_copy_pass_by_ref)] + pub fn as_raw(&self) -> c_int { + self.0 + } } /// An identifier of a certificate status type. @@ -470,18 +476,19 @@ pub struct StatusType(c_int); impl StatusType { + /// An OSCP status. + pub const OCSP: StatusType = StatusType(ffi::TLSEXT_STATUSTYPE_ocsp); + /// Constructs a `StatusType` from a raw OpenSSL value. pub fn from_raw(raw: c_int) -> StatusType { StatusType(raw) } /// Returns the raw OpenSSL value represented by this type. + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn as_raw(&self) -> c_int { self.0 } - - /// An OSCP status. - pub const OCSP: StatusType = StatusType(ffi::TLSEXT_STATUSTYPE_ocsp); } /// An identifier of a session name type. @@ -489,18 +496,19 @@ pub struct NameType(c_int); impl NameType { + /// A host name. + pub const HOST_NAME: NameType = NameType(ffi::TLSEXT_NAMETYPE_host_name); + /// Constructs a `StatusType` from a raw OpenSSL value. pub fn from_raw(raw: c_int) -> StatusType { StatusType(raw) } /// Returns the raw OpenSSL value represented by this type. + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn as_raw(&self) -> c_int { self.0 } - - /// A host name. - pub const HOST_NAME: NameType = NameType(ffi::TLSEXT_NAMETYPE_host_name); } lazy_static! { @@ -658,6 +666,10 @@ } /// Creates an `SslContextBuilder` from a pointer to a raw OpenSSL value. + /// + /// # Safety + /// + /// The caller must ensure that the pointer is valid and uniquely owned by the builder. pub unsafe fn from_ptr(ctx: *mut ffi::SSL_CTX) -> SslContextBuilder { SslContextBuilder(SslContext::from_ptr(ctx)) } @@ -1726,6 +1738,7 @@ /// This corresponds to [`SSL_CTX_sess_get_cache_size`]. /// /// [`SSL_CTX_sess_get_cache_size`]: https://www.openssl.org/docs/man1.0.2/man3/SSL_CTX_sess_set_cache_size.html + #[allow(clippy::identity_conversion)] pub fn set_session_cache_size(&mut self, size: i32) -> i64 { unsafe { ffi::SSL_CTX_sess_set_cache_size(self.as_ptr(), size.into()).into() } } @@ -1967,6 +1980,7 @@ /// This corresponds to [`SSL_CTX_sess_get_cache_size`]. /// /// [`SSL_CTX_sess_get_cache_size`]: https://www.openssl.org/docs/man1.0.2/man3/SSL_CTX_sess_set_cache_size.html + #[allow(clippy::identity_conversion)] pub fn session_cache_size(&self) -> i64 { unsafe { ffi::SSL_CTX_sess_get_cache_size(self.as_ptr()).into() } } @@ -2084,6 +2098,7 @@ /// This corresponds to [`SSL_CIPHER_get_bits`]. /// /// [`SSL_CIPHER_get_bits`]: https://www.openssl.org/docs/manmaster/man3/SSL_CIPHER_get_name.html + #[allow(clippy::identity_conversion)] pub fn bits(&self) -> CipherBits { unsafe { let mut algo_bits = 0; @@ -2242,6 +2257,7 @@ /// This corresponds to [`SSL_SESSION_get_time`]. /// /// [`SSL_SESSION_get_time`]: https://www.openssl.org/docs/man1.1.1/man3/SSL_SESSION_get_time.html + #[allow(clippy::identity_conversion)] pub fn time(&self) -> i64 { unsafe { ffi::SSL_SESSION_get_time(self.as_ptr()).into() } } @@ -2253,6 +2269,7 @@ /// This corresponds to [`SSL_SESSION_get_timeout`]. /// /// [`SSL_SESSION_get_timeout`]: https://www.openssl.org/docs/man1.1.1/man3/SSL_SESSION_get_time.html + #[allow(clippy::identity_conversion)] pub fn timeout(&self) -> i64 { unsafe { ffi::SSL_SESSION_get_timeout(self.as_ptr()).into() } } @@ -2868,7 +2885,7 @@ pub fn servername_raw(&self, type_: NameType) -> Option<&[u8]> { unsafe { let name = ffi::SSL_get_servername(self.as_ptr(), type_.0); - if name == ptr::null() { + if name.is_null() { None } else { Some(CStr::from_ptr(name as *const _).to_bytes()) @@ -3212,7 +3229,7 @@ /// The total size of the message is returned, so this can be used to determine the size of the /// buffer required. /// - /// This corresponds to `SSL_get_finished`. + /// This corresponds to `SSL_get_peer_finished`. pub fn peer_finished(&self, buf: &mut [u8]) -> usize { unsafe { ffi::SSL_get_peer_finished(self.as_ptr(), buf.as_mut_ptr() as *mut c_void, buf.len()) @@ -3351,6 +3368,13 @@ } } } + + /// Sets the MTU used for DTLS connections. + /// + /// This corresponds to `SSL_set_mtu`. + pub fn set_mtu(&mut self, mtu: u32) -> Result<(), ErrorStack> { + unsafe { cvt(ffi::SSL_set_mtu(self.as_ptr(), mtu as c_long) as c_int).map(|_| ()) } + } } /// An SSL stream midway through the handshake process. @@ -3450,6 +3474,18 @@ } } + /// Constructs an `SslStream` from a pointer to the underlying OpenSSL `SSL` struct. + /// + /// This is useful if the handshake has already been completed elsewhere. + /// + /// # Safety + /// + /// The caller must ensure the pointer is valid. + pub unsafe fn from_raw_parts(ssl: *mut ffi::SSL, stream: S) -> Self { + let ssl = Ssl::from_ptr(ssl); + Self::new_base(ssl, stream) + } + /// Like `read`, but returns an `ssl::Error` rather than an `io::Error`. /// /// It is particularly useful with a nonblocking socket, where the error value will identify if @@ -3464,7 +3500,7 @@ // that it read zero bytes, but zero is also the sentinel for "error". // To avoid that confusion short-circuit that logic and return quickly // if `buf` has a length of zero. - if buf.len() == 0 { + if buf.is_empty() { return Ok(0); } @@ -3486,7 +3522,7 @@ /// [`SSL_write`]: https://www.openssl.org/docs/manmaster/man3/SSL_write.html pub fn ssl_write(&mut self, buf: &[u8]) -> Result { // See above for why we short-circuit on zero-length buffers - if buf.len() == 0 { + if buf.is_empty() { return Ok(0); } @@ -3875,6 +3911,7 @@ /// /// # Panics /// This function panics if the given mtu size can't be represented in a positive `c_long` range + #[deprecated(note = "Use SslRef::set_mtu instead", since = "0.10.30")] pub fn set_dtls_mtu_size(&mut self, mtu_size: usize) { unsafe { let bio = self.inner.ssl.get_raw_rbio(); diff -Nru cargo-0.44.1/vendor/openssl/src/ssl/test/mod.rs cargo-0.47.0/vendor/openssl/src/ssl/test/mod.rs --- cargo-0.44.1/vendor/openssl/src/ssl/test/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/ssl/test/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -41,9 +41,9 @@ mod server; -static ROOT_CERT: &'static [u8] = include_bytes!("../../../test/root-ca.pem"); -static CERT: &'static [u8] = include_bytes!("../../../test/cert.pem"); -static KEY: &'static [u8] = include_bytes!("../../../test/key.pem"); +static ROOT_CERT: &[u8] = include_bytes!("../../../test/root-ca.pem"); +static CERT: &[u8] = include_bytes!("../../../test/cert.pem"); +static KEY: &[u8] = include_bytes!("../../../test/key.pem"); #[test] fn verify_untrusted() { @@ -321,10 +321,9 @@ .unwrap(); ctx.set_private_key_file(&Path::new("test/key.pem"), SslFiletype::PEM) .unwrap(); - let ssl = Ssl::new(&ctx.build()).unwrap(); - let mut builder = SslStreamBuilder::new(ssl, stream); - builder.set_dtls_mtu_size(1500); - let mut stream = builder.accept().unwrap(); + let mut ssl = Ssl::new(&ctx.build()).unwrap(); + ssl.set_mtu(1500).unwrap(); + let mut stream = ssl.accept(stream).unwrap(); let mut buf = [0; 60]; stream @@ -341,10 +340,9 @@ let mut ctx = SslContext::builder(SslMethod::dtls()).unwrap(); ctx.set_tlsext_use_srtp("SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32") .unwrap(); - let ssl = Ssl::new(&ctx.build()).unwrap(); - let mut builder = SslStreamBuilder::new(ssl, stream); - builder.set_dtls_mtu_size(1500); - let mut stream = builder.connect().unwrap(); + let mut ssl = Ssl::new(&ctx.build()).unwrap(); + ssl.set_mtu(1500).unwrap(); + let mut stream = ssl.connect(stream).unwrap(); let mut buf = [1; 60]; { @@ -385,7 +383,7 @@ .unwrap(); let mut profilenames = String::new(); for profile in ssl.srtp_profiles().unwrap() { - if profilenames.len() > 0 { + if !profilenames.is_empty() { profilenames.push(':'); } profilenames += profile.name(); @@ -394,9 +392,8 @@ "SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32", profilenames ); - let mut builder = SslStreamBuilder::new(ssl, stream); - builder.set_dtls_mtu_size(1500); - let mut stream = builder.accept().unwrap(); + ssl.set_mtu(1500).unwrap(); + let mut stream = ssl.accept(stream).unwrap(); let mut buf = [0; 60]; stream @@ -414,9 +411,8 @@ let mut ssl = Ssl::new(&ctx.build()).unwrap(); ssl.set_tlsext_use_srtp("SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32") .unwrap(); - let mut builder = SslStreamBuilder::new(ssl, stream); - builder.set_dtls_mtu_size(1500); - let mut stream = builder.connect().unwrap(); + ssl.set_mtu(1500).unwrap(); + let mut stream = ssl.connect(stream).unwrap(); let mut buf = [1; 60]; { @@ -603,6 +599,7 @@ #[test] #[cfg_attr(libressl250, ignore)] +#[cfg_attr(target_os = "windows", ignore)] #[cfg_attr(all(target_os = "macos", feature = "vendored"), ignore)] fn default_verify_paths() { let mut ctx = SslContext::builder(SslMethod::tls()).unwrap(); @@ -1310,7 +1307,7 @@ #[cfg(not(osslconf = "OPENSSL_NO_PSK"))] #[test] fn psk_ciphers() { - const CIPHER: &'static str = "PSK-AES128-CBC-SHA"; + const CIPHER: &str = "PSK-AES128-CBC-SHA"; const PSK: &[u8] = b"thisisaverysecurekey"; const CLIENT_IDENT: &[u8] = b"thisisaclient"; static CLIENT_CALLED: AtomicBool = AtomicBool::new(false); diff -Nru cargo-0.44.1/vendor/openssl/src/stack.rs cargo-0.47.0/vendor/openssl/src/stack.rs --- cargo-0.44.1/vendor/openssl/src/stack.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/stack.rs 2020-10-01 21:38:28.000000000 +0000 @@ -3,6 +3,7 @@ use libc::c_int; use std::borrow::Borrow; use std::convert::AsRef; +use std::fmt; use std::iter; use std::marker::PhantomData; use std::mem; @@ -43,10 +44,19 @@ unsafe impl Send for Stack {} unsafe impl Sync for Stack {} +impl fmt::Debug for Stack +where + T: Stackable, + T::Ref: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_list().entries(self).finish() + } +} impl Drop for Stack { fn drop(&mut self) { unsafe { - while let Some(_) = self.pop() {} + while self.pop().is_some() {} OPENSSL_sk_free(self.0 as *mut _); } } @@ -178,11 +188,16 @@ self.as_ptr() as *mut _ } - /// Returns the number of items in the stack + /// Returns the number of items in the stack. pub fn len(&self) -> usize { unsafe { OPENSSL_sk_num(self.as_stack()) as usize } } + /// Determines if the stack is empty. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn iter(&self) -> Iter { Iter { stack: self, diff -Nru cargo-0.44.1/vendor/openssl/src/symm.rs cargo-0.47.0/vendor/openssl/src/symm.rs --- cargo-0.44.1/vendor/openssl/src/symm.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/symm.rs 2020-10-01 21:38:28.000000000 +0000 @@ -130,6 +130,12 @@ unsafe { Cipher(ffi::EVP_aes_128_ofb()) } } + /// Requires OpenSSL 1.1.0 or newer. + #[cfg(ossl110)] + pub fn aes_128_ocb() -> Cipher { + unsafe { Cipher(ffi::EVP_aes_128_ocb()) } + } + pub fn aes_192_ecb() -> Cipher { unsafe { Cipher(ffi::EVP_aes_192_ecb()) } } @@ -166,6 +172,12 @@ unsafe { Cipher(ffi::EVP_aes_192_ofb()) } } + /// Requires OpenSSL 1.1.0 or newer. + #[cfg(ossl110)] + pub fn aes_192_ocb() -> Cipher { + unsafe { Cipher(ffi::EVP_aes_192_ocb()) } + } + pub fn aes_256_ecb() -> Cipher { unsafe { Cipher(ffi::EVP_aes_256_ecb()) } } @@ -206,6 +218,12 @@ unsafe { Cipher(ffi::EVP_aes_256_ofb()) } } + /// Requires OpenSSL 1.1.0 or newer. + #[cfg(ossl110)] + pub fn aes_256_ocb() -> Cipher { + unsafe { Cipher(ffi::EVP_aes_256_ocb()) } + } + pub fn bf_cbc() -> Cipher { unsafe { Cipher(ffi::EVP_bf_cbc()) } } @@ -258,21 +276,29 @@ unsafe { Cipher(ffi::EVP_chacha20_poly1305()) } } + /// Creates a `Cipher` from a raw pointer to its OpenSSL type. + /// + /// # Safety + /// + /// The caller must ensure the pointer is valid for the `'static` lifetime. pub unsafe fn from_ptr(ptr: *const ffi::EVP_CIPHER) -> Cipher { Cipher(ptr) } + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn as_ptr(&self) -> *const ffi::EVP_CIPHER { self.0 } /// Returns the length of keys used with this cipher. + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn key_len(&self) -> usize { unsafe { EVP_CIPHER_key_length(self.0) as usize } } /// Returns the length of the IV used with this cipher, or `None` if the /// cipher does not use an IV. + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn iv_len(&self) -> Option { unsafe { let len = EVP_CIPHER_iv_length(self.0) as usize; @@ -289,14 +315,28 @@ /// # Note /// /// Stream ciphers such as RC4 have a block size of 1. + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn block_size(&self) -> usize { unsafe { EVP_CIPHER_block_size(self.0) as usize } } /// Determines whether the cipher is using CCM mode - fn is_ccm(&self) -> bool { + fn is_ccm(self) -> bool { // NOTE: OpenSSL returns pointers to static structs, which makes this work as expected - *self == Cipher::aes_128_ccm() || *self == Cipher::aes_256_ccm() + self == Cipher::aes_128_ccm() || self == Cipher::aes_256_ccm() + } + + /// Determines whether the cipher is using OCB mode + #[cfg(ossl110)] + fn is_ocb(self) -> bool { + self == Cipher::aes_128_ocb() + || self == Cipher::aes_192_ocb() + || self == Cipher::aes_256_ocb() + } + + #[cfg(not(ossl110))] + const fn is_ocb(self) -> bool { + false } } @@ -390,7 +430,7 @@ unsafe { let ctx = cvt_p(ffi::EVP_CIPHER_CTX_new())?; let crypter = Crypter { - ctx: ctx, + ctx, block_size: t.block_size(), }; @@ -736,9 +776,12 @@ let mut c = Crypter::new(t, Mode::Encrypt, key, iv)?; let mut out = vec![0; data.len() + t.block_size()]; - if t.is_ccm() { + let is_ccm = t.is_ccm(); + if is_ccm || t.is_ocb() { c.set_tag_len(tag.len())?; - c.set_data_len(data.len())?; + if is_ccm { + c.set_data_len(data.len())?; + } } c.aad_update(aad)?; @@ -764,19 +807,23 @@ let mut c = Crypter::new(t, Mode::Decrypt, key, iv)?; let mut out = vec![0; data.len() + t.block_size()]; - if t.is_ccm() { + let is_ccm = t.is_ccm(); + if is_ccm || t.is_ocb() { c.set_tag(tag)?; - c.set_data_len(data.len())?; + if is_ccm { + c.set_data_len(data.len())?; + } } c.aad_update(aad)?; let count = c.update(data, &mut out)?; - let mut rest = 0; - if !t.is_ccm() { + let rest = if t.is_ccm() { + 0 + } else { c.set_tag(tag)?; - rest = c.finalize(&mut out[count..])?; - } + c.finalize(&mut out[count..])? + }; out.truncate(count + rest); Ok(out) @@ -1383,6 +1430,62 @@ &Vec::from_hex(aad).unwrap(), &Vec::from_hex(ct).unwrap(), &Vec::from_hex(tag).unwrap(), + ); + assert!(out.is_err()); + } + + #[test] + #[cfg(ossl110)] + fn test_aes_128_ocb() { + let key = "000102030405060708090a0b0c0d0e0f"; + let aad = "0001020304050607"; + let tag = "16dc76a46d47e1ead537209e8a96d14e"; + let iv = "000102030405060708090a0b"; + let pt = "0001020304050607"; + let ct = "92b657130a74b85a"; + + let mut actual_tag = [0; 16]; + let out = encrypt_aead( + Cipher::aes_128_ocb(), + &Vec::from_hex(key).unwrap(), + Some(&Vec::from_hex(iv).unwrap()), + &Vec::from_hex(aad).unwrap(), + &Vec::from_hex(pt).unwrap(), + &mut actual_tag, + ) + .unwrap(); + + assert_eq!(ct, hex::encode(out)); + assert_eq!(tag, hex::encode(actual_tag)); + + let out = decrypt_aead( + Cipher::aes_128_ocb(), + &Vec::from_hex(key).unwrap(), + Some(&Vec::from_hex(iv).unwrap()), + &Vec::from_hex(aad).unwrap(), + &Vec::from_hex(ct).unwrap(), + &Vec::from_hex(tag).unwrap(), + ) + .unwrap(); + assert_eq!(pt, hex::encode(out)); + } + + #[test] + #[cfg(ossl110)] + fn test_aes_128_ocb_fail() { + let key = "000102030405060708090a0b0c0d0e0f"; + let aad = "0001020304050607"; + let tag = "16dc76a46d47e1ead537209e8a96d14e"; + let iv = "000000000405060708090a0b"; + let ct = "92b657130a74b85a"; + + let out = decrypt_aead( + Cipher::aes_128_ocb(), + &Vec::from_hex(key).unwrap(), + Some(&Vec::from_hex(iv).unwrap()), + &Vec::from_hex(aad).unwrap(), + &Vec::from_hex(ct).unwrap(), + &Vec::from_hex(tag).unwrap(), ); assert!(out.is_err()); } diff -Nru cargo-0.44.1/vendor/openssl/src/x509/extension.rs cargo-0.47.0/vendor/openssl/src/x509/extension.rs --- cargo-0.44.1/vendor/openssl/src/x509/extension.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/x509/extension.rs 2020-10-01 21:38:28.000000000 +0000 @@ -33,6 +33,12 @@ pathlen: Option, } +impl Default for BasicConstraints { + fn default() -> BasicConstraints { + BasicConstraints::new() + } +} + impl BasicConstraints { /// Construct a new `BasicConstraints` extension. pub fn new() -> BasicConstraints { @@ -95,6 +101,12 @@ decipher_only: bool, } +impl Default for KeyUsage { + fn default() -> KeyUsage { + KeyUsage::new() + } +} + impl KeyUsage { /// Construct a new `KeyUsage` extension. pub fn new() -> KeyUsage { @@ -228,6 +240,12 @@ other: Vec, } +impl Default for ExtendedKeyUsage { + fn default() -> ExtendedKeyUsage { + ExtendedKeyUsage::new() + } +} + impl ExtendedKeyUsage { /// Construct a new `ExtendedKeyUsage` extension. pub fn new() -> ExtendedKeyUsage { @@ -354,6 +372,12 @@ critical: bool, } +impl Default for SubjectKeyIdentifier { + fn default() -> SubjectKeyIdentifier { + SubjectKeyIdentifier::new() + } +} + impl SubjectKeyIdentifier { /// Construct a new `SubjectKeyIdentifier` extension. pub fn new() -> SubjectKeyIdentifier { @@ -384,6 +408,12 @@ issuer: Option, } +impl Default for AuthorityKeyIdentifier { + fn default() -> AuthorityKeyIdentifier { + AuthorityKeyIdentifier::new() + } +} + impl AuthorityKeyIdentifier { /// Construct a new `AuthorityKeyIdentifier` extension. pub fn new() -> AuthorityKeyIdentifier { @@ -438,6 +468,12 @@ names: Vec, } +impl Default for SubjectAlternativeName { + fn default() -> SubjectAlternativeName { + SubjectAlternativeName::new() + } +} + impl SubjectAlternativeName { /// Construct a new `SubjectAlternativeName` extension. pub fn new() -> SubjectAlternativeName { diff -Nru cargo-0.44.1/vendor/openssl/src/x509/mod.rs cargo-0.47.0/vendor/openssl/src/x509/mod.rs --- cargo-0.44.1/vendor/openssl/src/x509/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/x509/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -68,7 +68,7 @@ pub fn new() -> Result { unsafe { ffi::init(); - cvt_p(ffi::X509_STORE_CTX_new()).map(|p| X509StoreContext(p)) + cvt_p(ffi::X509_STORE_CTX_new()).map(X509StoreContext) } } } @@ -671,6 +671,35 @@ } } +impl fmt::Debug for X509 { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let serial = match &self.serial_number().to_bn() { + Ok(bn) => match bn.to_hex_str() { + Ok(hex) => hex.to_string(), + Err(_) => "".to_string(), + }, + Err(_) => "".to_string(), + }; + let mut debug_struct = formatter.debug_struct("X509"); + debug_struct.field("serial_number", &serial); + debug_struct.field("signature_algorithm", &self.signature_algorithm().object()); + debug_struct.field("issuer", &self.issuer_name()); + debug_struct.field("subject", &self.subject_name()); + if let Some(subject_alt_names) = &self.subject_alt_names() { + debug_struct.field("subject_alt_names", subject_alt_names); + } + debug_struct.field("not_before", &self.not_before()); + debug_struct.field("not_after", &self.not_after()); + + if let Ok(public_key) = &self.public_key() { + debug_struct.field("public_key", public_key); + }; + // TODO: Print extensions once they are supported on the X509 struct. + + debug_struct.finish() + } +} + impl AsRef for X509Ref { fn as_ref(&self) -> &X509Ref { self @@ -849,7 +878,7 @@ impl X509NameRef { /// Returns the name entries by the nid. - pub fn entries_by_nid<'a>(&'a self, nid: Nid) -> X509NameEntries<'a> { + pub fn entries_by_nid(&self, nid: Nid) -> X509NameEntries<'_> { X509NameEntries { name: self, nid: Some(nid), @@ -858,7 +887,7 @@ } /// Returns an iterator over all `X509NameEntry` values - pub fn entries<'a>(&'a self) -> X509NameEntries<'a> { + pub fn entries(&self) -> X509NameEntries<'_> { X509NameEntries { name: self, nid: None, @@ -867,6 +896,12 @@ } } +impl fmt::Debug for X509NameRef { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.debug_list().entries(self.entries()).finish() + } +} + /// A type to destructure and examine an `X509Name`. pub struct X509NameEntries<'a> { name: &'a X509NameRef, @@ -942,6 +977,12 @@ } } +impl fmt::Debug for X509NameEntryRef { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_fmt(format_args!("{:?} = {:?}", self.object(), self.data())) + } +} + /// A builder used to construct an `X509Req`. pub struct X509ReqBuilder(X509Req); @@ -1213,6 +1254,7 @@ } /// Return the integer representation of an `X509VerifyResult`. + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn as_raw(&self) -> c_int { self.0 } @@ -1222,6 +1264,7 @@ /// This corresponds to [`X509_verify_cert_error_string`]. /// /// [`X509_verify_cert_error_string`]: https://www.openssl.org/docs/man1.1.0/crypto/X509_verify_cert_error_string.html + #[allow(clippy::trivially_copy_pass_by_ref)] pub fn error_string(&self) -> &'static str { ffi::init(); @@ -1295,6 +1338,23 @@ } } } + +impl fmt::Debug for GeneralNameRef { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + if let Some(email) = self.email() { + formatter.write_str(email) + } else if let Some(dnsname) = self.dnsname() { + formatter.write_str(dnsname) + } else if let Some(uri) = self.uri() { + formatter.write_str(uri) + } else if let Some(ipaddress) = self.ipaddress() { + let result = String::from_utf8_lossy(ipaddress); + formatter.write_str(&result) + } else { + formatter.write_str("(empty)") + } + } +} impl Stackable for GeneralName { type StackType = ffi::stack_st_GENERAL_NAME; diff -Nru cargo-0.44.1/vendor/openssl/src/x509/tests.rs cargo-0.47.0/vendor/openssl/src/x509/tests.rs --- cargo-0.44.1/vendor/openssl/src/x509/tests.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl/src/x509/tests.rs 2020-10-01 21:38:28.000000000 +0000 @@ -22,7 +22,7 @@ #[test] fn test_cert_loading() { let cert = include_bytes!("../../test/cert.pem"); - let cert = X509::from_pem(cert).ok().expect("Failed to load PEM"); + let cert = X509::from_pem(cert).unwrap(); let fingerprint = cert.digest(MessageDigest::sha1()).unwrap(); let hash_str = "59172d9313e84459bcff27f967e79e6e9217e584"; @@ -32,9 +32,22 @@ } #[test] +fn test_debug() { + let cert = include_bytes!("../../test/cert.pem"); + let cert = X509::from_pem(cert).unwrap(); + let debugged = format!("{:#?}", cert); + assert!(debugged.contains(r#"serial_number: "8771F7BDEE982FA5""#)); + assert!(debugged.contains(r#"signature_algorithm: sha256WithRSAEncryption"#)); + assert!(debugged.contains(r#"countryName = "AU""#)); + assert!(debugged.contains(r#"stateOrProvinceName = "Some-State""#)); + assert!(debugged.contains(r#"not_before: Aug 14 17:00:03 2016 GMT"#)); + assert!(debugged.contains(r#"not_after: Aug 12 17:00:03 2026 GMT"#)); +} + +#[test] fn test_cert_issue_validity() { let cert = include_bytes!("../../test/cert.pem"); - let cert = X509::from_pem(cert).ok().expect("Failed to load PEM"); + let cert = X509::from_pem(cert).unwrap(); let not_before = cert.not_before().to_string(); let not_after = cert.not_after().to_string(); @@ -45,7 +58,7 @@ #[test] fn test_save_der() { let cert = include_bytes!("../../test/cert.pem"); - let cert = X509::from_pem(cert).ok().expect("Failed to load PEM"); + let cert = X509::from_pem(cert).unwrap(); let der = cert.to_der().unwrap(); assert!(!der.is_empty()); @@ -101,8 +114,8 @@ assert_eq!(friendly.object().nid().as_raw(), Nid::FRIENDLYNAME.as_raw()); assert_eq!(&**friendly.data().as_utf8().unwrap(), "Example"); - if let Some(_) = all_entries.next() { - assert!(false); + if all_entries.next().is_some() { + panic!(); } } @@ -136,7 +149,7 @@ #[test] fn test_subject_alt_name_iter() { let cert = include_bytes!("../../test/alt_name_cert.pem"); - let cert = X509::from_pem(cert).ok().expect("Failed to load PEM"); + let cert = X509::from_pem(cert).unwrap(); let subject_alt_names = cert.subject_alt_names().unwrap(); let mut subject_alt_names_iter = subject_alt_names.iter(); @@ -232,7 +245,7 @@ .entries_by_nid(Nid::COMMONNAME) .next() .unwrap(); - assert_eq!("foobar.com".as_bytes(), cn.data().as_slice()); + assert_eq!(cn.data().as_slice(), b"foobar.com"); assert_eq!(serial, x509.serial_number().to_bn().unwrap()); } @@ -320,6 +333,7 @@ } #[test] +#[allow(clippy::redundant_clone)] fn clone_x509() { let cert = include_bytes!("../../test/cert.pem"); let cert = X509::from_pem(cert).unwrap(); diff -Nru cargo-0.44.1/vendor/openssl-sys/build/cfgs.rs cargo-0.47.0/vendor/openssl-sys/build/cfgs.rs --- cargo-0.44.1/vendor/openssl-sys/build/cfgs.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl-sys/build/cfgs.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,3 +1,4 @@ +#[allow(clippy::inconsistent_digit_grouping)] pub fn get(openssl_version: Option, libressl_version: Option) -> Vec<&'static str> { let mut cfgs = vec![]; diff -Nru cargo-0.44.1/vendor/openssl-sys/build/find_normal.rs cargo-0.47.0/vendor/openssl-sys/build/find_normal.rs --- cargo-0.44.1/vendor/openssl-sys/build/find_normal.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl-sys/build/find_normal.rs 2020-10-01 21:38:28.000000000 +0000 @@ -9,14 +9,15 @@ let lib_dir = env("OPENSSL_LIB_DIR").map(PathBuf::from); let include_dir = env("OPENSSL_INCLUDE_DIR").map(PathBuf::from); - if lib_dir.is_none() || include_dir.is_none() { - let openssl_dir = env("OPENSSL_DIR").unwrap_or_else(|| find_openssl_dir(&target)); - let openssl_dir = Path::new(&openssl_dir); - let lib_dir = lib_dir.unwrap_or_else(|| openssl_dir.join("lib")); - let include_dir = include_dir.unwrap_or_else(|| openssl_dir.join("include")); - (lib_dir, include_dir) - } else { - (lib_dir.unwrap(), include_dir.unwrap()) + match (lib_dir, include_dir) { + (Some(lib_dir), Some(include_dir)) => (lib_dir, include_dir), + (lib_dir, include_dir) => { + let openssl_dir = env("OPENSSL_DIR").unwrap_or_else(|| find_openssl_dir(&target)); + let openssl_dir = Path::new(&openssl_dir); + let lib_dir = lib_dir.unwrap_or_else(|| openssl_dir.join("lib")); + let include_dir = include_dir.unwrap_or_else(|| openssl_dir.join("include")); + (lib_dir, include_dir) + } } } @@ -93,7 +94,7 @@ if host.contains("apple-darwin") && target.contains("apple-darwin") { let system = Path::new("/usr/lib/libssl.0.9.8.dylib"); if system.exists() { - msg.push_str(&format!( + msg.push_str( " It looks like you're compiling on macOS, where the system contains a version of @@ -105,27 +106,28 @@ Unfortunately though the compile cannot continue, so aborting. -" - )); +", + ); } } - if host.contains("unknown-linux") && target.contains("unknown-linux-gnu") { - if Command::new("pkg-config").output().is_err() { - msg.push_str(&format!( - " + if host.contains("unknown-linux") + && target.contains("unknown-linux-gnu") + && Command::new("pkg-config").output().is_err() + { + msg.push_str( + " It looks like you're compiling on Linux and also targeting Linux. Currently this requires the `pkg-config` utility to find OpenSSL but unfortunately `pkg-config` could not be found. If you have OpenSSL installed you can likely fix this by installing `pkg-config`. -" - )); - } +", + ); } if host.contains("windows") && target.contains("windows-gnu") { - msg.push_str(&format!( + msg.push_str( " It looks like you're compiling for MinGW but you may not have either OpenSSL or pkg-config installed. You can install these two dependencies with: @@ -134,12 +136,12 @@ and try building this crate again. -" - )); +", + ); } if host.contains("windows") && target.contains("windows-msvc") { - msg.push_str(&format!( + msg.push_str( " It looks like you're compiling for MSVC but we couldn't detect an OpenSSL installation. If there isn't one installed then you can try the rust-openssl @@ -148,8 +150,8 @@ https://github.com/sfackler/rust-openssl#windows -" - )); +", + ); } panic!(msg); @@ -207,10 +209,7 @@ .find_package("openssl"); if let Err(e) = lib { - println!( - "note: vcpkg did not find openssl: {}", - e - ); + println!("note: vcpkg did not find openssl: {}", e); return; } @@ -237,5 +236,6 @@ } } } - return None; + + None } diff -Nru cargo-0.44.1/vendor/openssl-sys/build/main.rs cargo-0.47.0/vendor/openssl-sys/build/main.rs --- cargo-0.44.1/vendor/openssl-sys/build/main.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl-sys/build/main.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,3 +1,5 @@ +#![allow(clippy::inconsistent_digit_grouping)] + extern crate autocfg; extern crate cc; #[cfg(feature = "vendored_debian_disabled")] @@ -79,11 +81,11 @@ ); println!("cargo:include={}", include_dir.to_string_lossy()); - let version = validate_headers(&[include_dir.clone().into()]); + let version = validate_headers(&[include_dir]); let libs_env = env("OPENSSL_LIBS"); let libs = match libs_env.as_ref().and_then(|s| s.to_str()) { - Some(ref v) => v.split(":").collect(), + Some(ref v) => v.split(':').collect(), None => match version { Version::Openssl10x if target.contains("windows") => vec!["ssleay32", "libeay32"], Version::Openssl11x if target.contains("windows-msvc") => vec!["libssl", "libcrypto"], @@ -218,6 +220,7 @@ (3, 0, _) => ('3', '0', 'x'), (3, 1, 0) => ('3', '1', '0'), (3, 1, _) => ('3', '1', 'x'), + (3, 2, 0) => ('3', '2', '0'), _ => version_error(), }; @@ -258,7 +261,7 @@ " This crate is only compatible with OpenSSL 1.0.1 through 1.1.1, or LibreSSL 2.5 -through 3.1.x, but a different version of OpenSSL was found. The build is now aborting +through 3.2.0, but a different version of OpenSSL was found. The build is now aborting due to this version mismatch. " diff -Nru cargo-0.44.1/vendor/openssl-sys/.cargo-checksum.json cargo-0.47.0/vendor/openssl-sys/.cargo-checksum.json --- cargo-0.44.1/vendor/openssl-sys/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl-sys/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"7410fef80af8ac071d4f63755c0ab89ac3df0fd1ea91f1d1f37cf5cec4395990"} \ No newline at end of file +{"files":{},"package":"a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/openssl-sys/Cargo.toml cargo-0.47.0/vendor/openssl-sys/Cargo.toml --- cargo-0.44.1/vendor/openssl-sys/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl-sys/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "openssl-sys" -version = "0.9.57" +version = "0.9.58" authors = ["Alex Crichton ", "Steven Fackler "] build = "build/main.rs" links = "openssl" diff -Nru cargo-0.44.1/vendor/openssl-sys/CHANGELOG.md cargo-0.47.0/vendor/openssl-sys/CHANGELOG.md --- cargo-0.44.1/vendor/openssl-sys/CHANGELOG.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl-sys/CHANGELOG.md 2020-10-01 21:38:28.000000000 +0000 @@ -2,6 +2,18 @@ ## [Unreleased] +## [v0.9.58] - 2020-06-05 + +### Added + +* Added `SSL_set_mtu`. +* Added support for LibreSSL 3.2.0. +* Added `PEM_read_bio_EC_PUBKEY`, `PEM_write_bio_EC_PUBKEY`, `d2i_EC_PUBKEY`, and `i2d_EC_PUBKEY`. +* Added `EVP_PKEY_encrypt_init`, `EVP_PKEY_encrypt`, `EVP_PKEY_decrypt_init`, `EVP_PKEY_decrypt`, + `EVP_PKEY_get_raw_public_key`, `EVP_PKEY_new_raw_public_key`, `EVP_PKEY_get_raw_private_key`, + and `EVP_PKEY_new_raw_private_key`. +* Added `OBJ_sn2nid`. + ## [v0.9.57] - 2020-05-24 ### Added diff -Nru cargo-0.44.1/vendor/openssl-sys/.pc/disable-vendor.patch/build/main.rs cargo-0.47.0/vendor/openssl-sys/.pc/disable-vendor.patch/build/main.rs --- cargo-0.44.1/vendor/openssl-sys/.pc/disable-vendor.patch/build/main.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl-sys/.pc/disable-vendor.patch/build/main.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,3 +1,5 @@ +#![allow(clippy::inconsistent_digit_grouping)] + extern crate autocfg; extern crate cc; #[cfg(feature = "vendored")] @@ -79,11 +81,11 @@ ); println!("cargo:include={}", include_dir.to_string_lossy()); - let version = validate_headers(&[include_dir.clone().into()]); + let version = validate_headers(&[include_dir]); let libs_env = env("OPENSSL_LIBS"); let libs = match libs_env.as_ref().and_then(|s| s.to_str()) { - Some(ref v) => v.split(":").collect(), + Some(ref v) => v.split(':').collect(), None => match version { Version::Openssl10x if target.contains("windows") => vec!["ssleay32", "libeay32"], Version::Openssl11x if target.contains("windows-msvc") => vec!["libssl", "libcrypto"], @@ -218,6 +220,7 @@ (3, 0, _) => ('3', '0', 'x'), (3, 1, 0) => ('3', '1', '0'), (3, 1, _) => ('3', '1', 'x'), + (3, 2, 0) => ('3', '2', '0'), _ => version_error(), }; @@ -258,7 +261,7 @@ " This crate is only compatible with OpenSSL 1.0.1 through 1.1.1, or LibreSSL 2.5 -through 3.1.x, but a different version of OpenSSL was found. The build is now aborting +through 3.2.0, but a different version of OpenSSL was found. The build is now aborting due to this version mismatch. " diff -Nru cargo-0.44.1/vendor/openssl-sys/.pc/disable-vendor.patch/Cargo.toml cargo-0.47.0/vendor/openssl-sys/.pc/disable-vendor.patch/Cargo.toml --- cargo-0.44.1/vendor/openssl-sys/.pc/disable-vendor.patch/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl-sys/.pc/disable-vendor.patch/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "openssl-sys" -version = "0.9.57" +version = "0.9.58" authors = ["Alex Crichton ", "Steven Fackler "] build = "build/main.rs" links = "openssl" diff -Nru cargo-0.44.1/vendor/openssl-sys/src/err.rs cargo-0.47.0/vendor/openssl-sys/src/err.rs --- cargo-0.44.1/vendor/openssl-sys/src/err.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl-sys/src/err.rs 2020-10-01 21:38:28.000000000 +0000 @@ -9,7 +9,7 @@ pub const fn ERR_PACK(l: c_int, f: c_int, r: c_int) -> c_ulong { ((l as c_ulong & 0x0FF) << 24) | ((f as c_ulong & 0xFFF) << 12) | - ((r as c_ulong & 0xFFF)) + (r as c_ulong & 0xFFF) } pub const fn ERR_GET_LIB(l: c_ulong) -> c_int { diff -Nru cargo-0.44.1/vendor/openssl-sys/src/evp.rs cargo-0.47.0/vendor/openssl-sys/src/evp.rs --- cargo-0.44.1/vendor/openssl-sys/src/evp.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl-sys/src/evp.rs 2020-10-01 21:38:28.000000000 +0000 @@ -167,7 +167,7 @@ pub fn EVP_DecryptFinal_ex( ctx: *mut EVP_CIPHER_CTX, outm: *mut c_uchar, - outl: *mut c_int + outl: *mut c_int, ) -> c_int; } cfg_if! { @@ -433,6 +433,23 @@ pub fn EVP_PKEY_keygen_init(ctx: *mut EVP_PKEY_CTX) -> c_int; pub fn EVP_PKEY_keygen(ctx: *mut EVP_PKEY_CTX, key: *mut *mut EVP_PKEY) -> c_int; + + pub fn EVP_PKEY_encrypt_init(ctx: *mut EVP_PKEY_CTX) -> c_int; + pub fn EVP_PKEY_encrypt( + ctx: *mut EVP_PKEY_CTX, + pout: *mut c_uchar, + poutlen: *mut size_t, + pin: *const c_uchar, + pinlen: size_t, + ) -> c_int; + pub fn EVP_PKEY_decrypt_init(ctx: *mut EVP_PKEY_CTX) -> c_int; + pub fn EVP_PKEY_decrypt( + ctx: *mut EVP_PKEY_CTX, + pout: *mut c_uchar, + poutlen: *mut size_t, + pin: *const c_uchar, + pinlen: size_t, + ) -> c_int; } cfg_if! { @@ -446,6 +463,35 @@ } } } + +cfg_if! { + if #[cfg(any(ossl111))] { + extern "C" { + pub fn EVP_PKEY_get_raw_public_key( + pkey: *const EVP_PKEY, + ppub: *mut c_uchar, + len: *mut size_t, + ) -> c_int; + pub fn EVP_PKEY_new_raw_public_key( + ttype: c_int, + e: *mut ENGINE, + key: *const c_uchar, + keylen: size_t, + ) -> *mut EVP_PKEY; + pub fn EVP_PKEY_get_raw_private_key( + pkey: *const EVP_PKEY, + ppriv: *mut c_uchar, + len: *mut size_t, + ) -> c_int; + pub fn EVP_PKEY_new_raw_private_key( + ttype: c_int, + e: *mut ENGINE, + key: *const c_uchar, + keylen: size_t, + ) -> *mut EVP_PKEY; + } + } +} extern "C" { pub fn EVP_EncodeBlock(dst: *mut c_uchar, src: *const c_uchar, src_len: c_int) -> c_int; diff -Nru cargo-0.44.1/vendor/openssl-sys/src/lib.rs cargo-0.47.0/vendor/openssl-sys/src/lib.rs --- cargo-0.44.1/vendor/openssl-sys/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl-sys/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,5 +1,13 @@ -#![allow(non_camel_case_types, non_upper_case_globals, non_snake_case)] -#![allow(dead_code, overflowing_literals, unused_imports)] +#![allow( + clippy::missing_safety_doc, + clippy::unreadable_literal, + dead_code, + non_camel_case_types, + non_snake_case, + non_upper_case_globals, + overflowing_literals, + unused_imports +)] #![doc(html_root_url = "https://docs.rs/openssl-sys/0.9")] extern crate libc; diff -Nru cargo-0.44.1/vendor/openssl-sys/src/object.rs cargo-0.47.0/vendor/openssl-sys/src/object.rs --- cargo-0.44.1/vendor/openssl-sys/src/object.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl-sys/src/object.rs 2020-10-01 21:38:28.000000000 +0000 @@ -15,4 +15,5 @@ pub fn OBJ_find_sigid_algs(signid: c_int, pdig_nid: *mut c_int, ppkey_nid: *mut c_int) -> c_int; + pub fn OBJ_sn2nid(sn: *const libc::c_char) -> libc::c_int; } diff -Nru cargo-0.44.1/vendor/openssl-sys/src/pem.rs cargo-0.47.0/vendor/openssl-sys/src/pem.rs --- cargo-0.44.1/vendor/openssl-sys/src/pem.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl-sys/src/pem.rs 2020-10-01 21:38:28.000000000 +0000 @@ -92,6 +92,13 @@ callback: pem_password_cb, user_data: *mut c_void, ) -> c_int; + pub fn PEM_read_bio_EC_PUBKEY( + bp: *mut BIO, + ec: *mut *mut EC_KEY, + callback: pem_password_cb, + user_data: *mut c_void, + ) -> *mut EC_KEY; + pub fn PEM_write_bio_EC_PUBKEY(bp: *mut BIO, ec: *mut EC_KEY) -> c_int; pub fn PEM_read_bio_DHparams( bio: *mut BIO, out: *mut *mut DH, @@ -142,9 +149,7 @@ buf: *mut *const u8, length: c_long, ) -> *mut PKCS8_PRIV_KEY_INFO; - pub fn PKCS8_PRIV_KEY_INFO_free( - p8inf: *mut PKCS8_PRIV_KEY_INFO, - ); + pub fn PKCS8_PRIV_KEY_INFO_free(p8inf: *mut PKCS8_PRIV_KEY_INFO); pub fn PEM_read_bio_PKCS7( bio: *mut BIO, diff -Nru cargo-0.44.1/vendor/openssl-sys/src/ssl.rs cargo-0.47.0/vendor/openssl-sys/src/ssl.rs --- cargo-0.44.1/vendor/openssl-sys/src/ssl.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl-sys/src/ssl.rs 2020-10-01 21:38:28.000000000 +0000 @@ -450,6 +450,10 @@ } } +pub unsafe fn SSL_set_mtu(ssl: *mut SSL, mtu: c_long) -> c_long { + SSL_ctrl(ssl, SSL_CTRL_SET_MTU, mtu, ptr::null_mut()) +} + pub type GEN_SESSION_CB = Option c_int>; @@ -711,6 +715,7 @@ #[cfg(any(libressl, all(ossl101, not(ossl110))))] pub const SSL_CTRL_GET_SESSION_REUSED: c_int = 8; pub const SSL_CTRL_EXTRA_CHAIN_CERT: c_int = 14; +pub const SSL_CTRL_SET_MTU: c_int = 17; #[cfg(any(libressl, all(ossl101, not(ossl110))))] pub const SSL_CTRL_OPTIONS: c_int = 32; pub const SSL_CTRL_MODE: c_int = 33; diff -Nru cargo-0.44.1/vendor/openssl-sys/src/x509.rs cargo-0.47.0/vendor/openssl-sys/src/x509.rs --- cargo-0.44.1/vendor/openssl-sys/src/x509.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/openssl-sys/src/x509.rs 2020-10-01 21:38:28.000000000 +0000 @@ -132,6 +132,12 @@ pub fn i2d_RSA_PUBKEY(k: *mut RSA, buf: *mut *mut u8) -> c_int; pub fn d2i_DSA_PUBKEY(k: *mut *mut DSA, pp: *mut *const c_uchar, length: c_long) -> *mut DSA; pub fn i2d_DSA_PUBKEY(a: *mut DSA, pp: *mut *mut c_uchar) -> c_int; + pub fn d2i_EC_PUBKEY( + a: *mut *mut EC_KEY, + pp: *mut *const c_uchar, + length: c_long, + ) -> *mut EC_KEY; + pub fn i2d_EC_PUBKEY(a: *mut EC_KEY, pp: *mut *mut c_uchar) -> c_int; pub fn i2d_PrivateKey(k: *mut EVP_PKEY, buf: *mut *mut u8) -> c_int; pub fn d2i_ECPrivateKey( diff -Nru cargo-0.44.1/vendor/pkg-config/.cargo-checksum.json cargo-0.47.0/vendor/pkg-config/.cargo-checksum.json --- cargo-0.44.1/vendor/pkg-config/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/pkg-config/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677"} \ No newline at end of file +{"files":{},"package":"d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/pkg-config/Cargo.toml cargo-0.47.0/vendor/pkg-config/Cargo.toml --- cargo-0.44.1/vendor/pkg-config/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/pkg-config/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "pkg-config" -version = "0.3.17" +version = "0.3.18" authors = ["Alex Crichton "] description = "A library to run the pkg-config system tool at build time in order to be used in\nCargo build scripts.\n" documentation = "https://docs.rs/pkg-config" diff -Nru cargo-0.44.1/vendor/pkg-config/CHANGELOG.md cargo-0.47.0/vendor/pkg-config/CHANGELOG.md --- cargo-0.44.1/vendor/pkg-config/CHANGELOG.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/pkg-config/CHANGELOG.md 2020-10-01 21:38:28.000000000 +0000 @@ -5,6 +5,21 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## [0.3.18] - 2020-07-11 + +### Fixed + +- Use `env::var_os()` almost everywhere to handle non-UTF8 paths in + environment variables, and also improve error handling around environment + variable handling (#106). + +### Changed + +- Default the `env_metadata` build parameter to `true` instead of `false`. + Whenever a pkg-config related environment variable changes it would make + sense to rebuild crates that use pkg-config, or otherwise changes might not + be picked up. As such the previous default didn't make much sense (#105). + ## [0.3.17] - 2019-11-02 ### Fixed diff -Nru cargo-0.44.1/vendor/pkg-config/debian/patches/debian-auto-cross-compile.patch cargo-0.47.0/vendor/pkg-config/debian/patches/debian-auto-cross-compile.patch --- cargo-0.44.1/vendor/pkg-config/debian/patches/debian-auto-cross-compile.patch 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/pkg-config/debian/patches/debian-auto-cross-compile.patch 2020-10-01 21:38:28.000000000 +0000 @@ -1,17 +1,15 @@ --- a/src/lib.rs +++ b/src/lib.rs -@@ -385,9 +385,11 @@ - } - +@@ -399,7 +389,11 @@ impl Config { fn command(&self, name: &str, args: &[&str]) -> Command { -- let exe = self -- .env_var("PKG_CONFIG") -- .unwrap_or_else(|_| String::from("pkg-config")); -+ let exe = self.env_var("PKG_CONFIG").unwrap_or_else(|_| { -+ self.env_var("DEB_HOST_GNU_TYPE") -+ .map(|t| t.to_string() + "-pkg-config") -+ .unwrap_or_else(|_| String::from("pkg-config")) -+ }); + let exe = self + .env_var_os("PKG_CONFIG") +- .unwrap_or_else(|| OsString::from("pkg-config")); ++ .unwrap_or_else(|| { ++ self.env_var_os("DEB_HOST_GNU_TYPE") ++ .map(|mut t| { t.push(OsString::from("-pkg-config")); t }) ++ .unwrap_or_else(|| OsString::from("pkg-config")) ++ }); let mut cmd = Command::new(exe); if self.is_static(name) { cmd.arg("--static"); diff -Nru cargo-0.44.1/vendor/pkg-config/debian/patches/no-special-snowflake-env.patch cargo-0.47.0/vendor/pkg-config/debian/patches/no-special-snowflake-env.patch --- cargo-0.44.1/vendor/pkg-config/debian/patches/no-special-snowflake-env.patch 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/pkg-config/debian/patches/no-special-snowflake-env.patch 2020-10-01 21:38:28.000000000 +0000 @@ -1,21 +1,3 @@ ---- a/tests/test.rs -+++ b/tests/test.rs -@@ -34,7 +34,6 @@ - pkg_config::probe_library(name) - } - --#[test] - fn cross_disabled() { - let _g = LOCK.lock(); - reset(); -@@ -46,7 +45,6 @@ - } - } - --#[test] - fn cross_enabled() { - let _g = LOCK.lock(); - reset(); --- a/src/lib.rs +++ b/src/lib.rs @@ -9,8 +9,6 @@ @@ -27,22 +9,63 @@ //! * `FOO_NO_PKG_CONFIG` - if set, this will disable running `pkg-config` when //! probing for the library named `foo`. //! -@@ -344,17 +342,7 @@ +@@ -106,9 +104,8 @@ pub enum Error { + /// Contains the name of the responsible environment variable. + EnvNoPkgConfig(String), + +- /// Cross compilation detected. +- /// +- /// Override with `PKG_CONFIG_ALLOW_CROSS=1`. ++ /// Cross compilation detected. Kept for compatibility; ++ /// the Debian package never emits this. + CrossCompilation, + + /// Failed to run `pkg-config`. +@@ -152,11 +145,6 @@ impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match *self { + Error::EnvNoPkgConfig(ref name) => write!(f, "Aborted because {} is set", name), +- Error::CrossCompilation => write!( +- f, +- "Cross compilation detected. \ +- Use PKG_CONFIG_ALLOW_CROSS=1 to override" +- ), + Error::Command { + ref command, + ref cause, +@@ -180,7 +168,7 @@ impl fmt::Display for Error { + } + Ok(()) + } +- Error::__Nonexhaustive => panic!(), ++ Error::CrossCompilation | Error::__Nonexhaustive => panic!(), + } + } + } +@@ -341,6 +329,8 @@ impl Config { + if host == target { + return true; + } ++ // always enable PKG_CONFIG_ALLOW_CROSS override in Debian ++ return true; // pkg-config may not be aware of cross-compilation, and require // a wrapper script that sets up platform-specific prefixes. -- match self.targetted_env_var("PKG_CONFIG_ALLOW_CROSS") { -- // don't use pkg-config if explicitly disabled -- Ok(ref val) if val == "0" => false, -- Ok(_) => true, -- Err(_) => { -- // if not disabled, and pkg-config is customized, -- // then assume it's prepared for cross-compilation -- self.targetted_env_var("PKG_CONFIG").is_ok() -- || self.targetted_env_var("PKG_CONFIG_SYSROOT_DIR").is_ok() -- } -- } -+ true +--- a/tests/test.rs ++++ b/tests/test.rs +@@ -34,7 +34,6 @@ fn find(name: &str) -> Result &str { - match *self { - Error::EnvNoPkgConfig(_) => "pkg-config requested to be aborted", - Error::CrossCompilation => { - "pkg-config doesn't handle cross compilation. \ - Use PKG_CONFIG_ALLOW_CROSS=1 to override" - } - Error::Command { .. } => "failed to run pkg-config", - Error::Failure { .. } => "pkg-config did not exit sucessfully", - Error::__Nonexhaustive => panic!(), - } - } - - fn cause(&self) -> Option<&dyn error::Error> { - match *self { - Error::Command { ref cause, .. } => Some(cause), - _ => None, - } - } -} +impl error::Error for Error {} impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { match *self { Error::EnvNoPkgConfig(ref name) => write!(f, "Aborted because {} is set", name), - Error::CrossCompilation => write!( - f, - "Cross compilation detected. \ - Use PKG_CONFIG_ALLOW_CROSS=1 to override" - ), Error::Command { ref command, ref cause, @@ -178,7 +152,7 @@ } Ok(()) } - Error::__Nonexhaustive => panic!(), + Error::CrossCompilation | Error::__Nonexhaustive => panic!(), } } } @@ -215,7 +189,7 @@ print_system_cflags: true, print_system_libs: true, cargo_metadata: true, - env_metadata: false, + env_metadata: true, } } @@ -277,7 +251,7 @@ /// Define whether metadata should be emitted for cargo allowing to /// automatically rebuild when environment variables change. Defaults to - /// `false`. + /// `true`. pub fn env_metadata(&mut self, env_metadata: bool) -> &mut Config { self.env_metadata = env_metadata; self @@ -331,18 +305,30 @@ } pub fn target_supported(&self) -> bool { - let target = env::var("TARGET").unwrap_or_default(); - let host = env::var("HOST").unwrap_or_default(); + let target = env::var_os("TARGET").unwrap_or_default(); + let host = env::var_os("HOST").unwrap_or_default(); // Only use pkg-config in host == target situations by default (allowing an // override). if host == target { return true; } + // always enable PKG_CONFIG_ALLOW_CROSS override in Debian + return true; // pkg-config may not be aware of cross-compilation, and require // a wrapper script that sets up platform-specific prefixes. - true + match self.targetted_env_var("PKG_CONFIG_ALLOW_CROSS") { + // don't use pkg-config if explicitly disabled + Some(ref val) if val == "0" => false, + Some(_) => true, + None => { + // if not disabled, and pkg-config is customized, + // then assume it's prepared for cross-compilation + self.targetted_env_var("PKG_CONFIG").is_some() + || self.targetted_env_var("PKG_CONFIG_SYSROOT_DIR").is_some() + } + } } /// Deprecated in favor of the top level `get_variable` function @@ -351,26 +337,27 @@ get_variable(package, variable).map_err(|e| e.to_string()) } - fn targetted_env_var(&self, var_base: &str) -> Result { - if let Ok(target) = env::var("TARGET") { - let host = env::var("HOST")?; - let kind = if host == target { "HOST" } else { "TARGET" }; - let target_u = target.replace("-", "_"); - - self.env_var(&format!("{}_{}", var_base, target)) - .or_else(|_| self.env_var(&format!("{}_{}", var_base, target_u))) - .or_else(|_| self.env_var(&format!("{}_{}", kind, var_base))) - .or_else(|_| self.env_var(var_base)) - } else { - self.env_var(var_base) - } - } - - fn env_var(&self, name: &str) -> Result { - if self.env_metadata { - println!("cargo:rerun-if-env-changed={}", name); + fn targetted_env_var(&self, var_base: &str) -> Option { + match (env::var("TARGET"), env::var("HOST")) { + (Ok(target), Ok(host)) => { + let kind = if host == target { "HOST" } else { "TARGET" }; + let target_u = target.replace("-", "_"); + + self.env_var_os(&format!("{}_{}", var_base, target)) + .or_else(|| self.env_var_os(&format!("{}_{}", var_base, target_u))) + .or_else(|| self.env_var_os(&format!("{}_{}", kind, var_base))) + .or_else(|| self.env_var_os(var_base)) + } + (Err(env::VarError::NotPresent), _) | (_, Err(env::VarError::NotPresent)) => { + self.env_var_os(var_base) + } + (Err(env::VarError::NotUnicode(s)), _) | (_, Err(env::VarError::NotUnicode(s))) => { + panic!( + "HOST or TARGET environment variable is not valid unicode: {:?}", + s + ) + } } - env::var(name) } fn env_var_os(&self, name: &str) -> Option { @@ -386,21 +373,21 @@ fn command(&self, name: &str, args: &[&str]) -> Command { let exe = self - .env_var("PKG_CONFIG") - .unwrap_or_else(|_| String::from("pkg-config")); + .env_var_os("PKG_CONFIG") + .unwrap_or_else(|| OsString::from("pkg-config")); let mut cmd = Command::new(exe); if self.is_static(name) { cmd.arg("--static"); } cmd.args(args).args(&self.extra_args); - if let Ok(value) = self.targetted_env_var("PKG_CONFIG_PATH") { + if let Some(value) = self.targetted_env_var("PKG_CONFIG_PATH") { cmd.env("PKG_CONFIG_PATH", value); } - if let Ok(value) = self.targetted_env_var("PKG_CONFIG_LIBDIR") { + if let Some(value) = self.targetted_env_var("PKG_CONFIG_LIBDIR") { cmd.env("PKG_CONFIG_LIBDIR", value); } - if let Ok(value) = self.targetted_env_var("PKG_CONFIG_SYSROOT_DIR") { + if let Some(value) = self.targetted_env_var("PKG_CONFIG_SYSROOT_DIR") { cmd.env("PKG_CONFIG_SYSROOT_DIR", value); } if self.print_system_libs { diff -Nru cargo-0.44.1/vendor/pkg-config/.pc/no-special-snowflake-env.patch/src/lib.rs cargo-0.47.0/vendor/pkg-config/.pc/no-special-snowflake-env.patch/src/lib.rs --- cargo-0.44.1/vendor/pkg-config/.pc/no-special-snowflake-env.patch/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/pkg-config/.pc/no-special-snowflake-env.patch/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -126,27 +126,7 @@ __Nonexhaustive, } -impl error::Error for Error { - fn description(&self) -> &str { - match *self { - Error::EnvNoPkgConfig(_) => "pkg-config requested to be aborted", - Error::CrossCompilation => { - "pkg-config doesn't handle cross compilation. \ - Use PKG_CONFIG_ALLOW_CROSS=1 to override" - } - Error::Command { .. } => "failed to run pkg-config", - Error::Failure { .. } => "pkg-config did not exit sucessfully", - Error::__Nonexhaustive => panic!(), - } - } - - fn cause(&self) -> Option<&dyn error::Error> { - match *self { - Error::Command { ref cause, .. } => Some(cause), - _ => None, - } - } -} +impl error::Error for Error {} impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { @@ -217,7 +197,7 @@ print_system_cflags: true, print_system_libs: true, cargo_metadata: true, - env_metadata: false, + env_metadata: true, } } @@ -279,7 +259,7 @@ /// Define whether metadata should be emitted for cargo allowing to /// automatically rebuild when environment variables change. Defaults to - /// `false`. + /// `true`. pub fn env_metadata(&mut self, env_metadata: bool) -> &mut Config { self.env_metadata = env_metadata; self @@ -333,8 +313,8 @@ } pub fn target_supported(&self) -> bool { - let target = env::var("TARGET").unwrap_or_default(); - let host = env::var("HOST").unwrap_or_default(); + let target = env::var_os("TARGET").unwrap_or_default(); + let host = env::var_os("HOST").unwrap_or_default(); // Only use pkg-config in host == target situations by default (allowing an // override). @@ -346,13 +326,13 @@ // a wrapper script that sets up platform-specific prefixes. match self.targetted_env_var("PKG_CONFIG_ALLOW_CROSS") { // don't use pkg-config if explicitly disabled - Ok(ref val) if val == "0" => false, - Ok(_) => true, - Err(_) => { + Some(ref val) if val == "0" => false, + Some(_) => true, + None => { // if not disabled, and pkg-config is customized, // then assume it's prepared for cross-compilation - self.targetted_env_var("PKG_CONFIG").is_ok() - || self.targetted_env_var("PKG_CONFIG_SYSROOT_DIR").is_ok() + self.targetted_env_var("PKG_CONFIG").is_some() + || self.targetted_env_var("PKG_CONFIG_SYSROOT_DIR").is_some() } } } @@ -363,26 +343,27 @@ get_variable(package, variable).map_err(|e| e.to_string()) } - fn targetted_env_var(&self, var_base: &str) -> Result { - if let Ok(target) = env::var("TARGET") { - let host = env::var("HOST")?; - let kind = if host == target { "HOST" } else { "TARGET" }; - let target_u = target.replace("-", "_"); - - self.env_var(&format!("{}_{}", var_base, target)) - .or_else(|_| self.env_var(&format!("{}_{}", var_base, target_u))) - .or_else(|_| self.env_var(&format!("{}_{}", kind, var_base))) - .or_else(|_| self.env_var(var_base)) - } else { - self.env_var(var_base) - } - } - - fn env_var(&self, name: &str) -> Result { - if self.env_metadata { - println!("cargo:rerun-if-env-changed={}", name); + fn targetted_env_var(&self, var_base: &str) -> Option { + match (env::var("TARGET"), env::var("HOST")) { + (Ok(target), Ok(host)) => { + let kind = if host == target { "HOST" } else { "TARGET" }; + let target_u = target.replace("-", "_"); + + self.env_var_os(&format!("{}_{}", var_base, target)) + .or_else(|| self.env_var_os(&format!("{}_{}", var_base, target_u))) + .or_else(|| self.env_var_os(&format!("{}_{}", kind, var_base))) + .or_else(|| self.env_var_os(var_base)) + } + (Err(env::VarError::NotPresent), _) | (_, Err(env::VarError::NotPresent)) => { + self.env_var_os(var_base) + } + (Err(env::VarError::NotUnicode(s)), _) | (_, Err(env::VarError::NotUnicode(s))) => { + panic!( + "HOST or TARGET environment variable is not valid unicode: {:?}", + s + ) + } } - env::var(name) } fn env_var_os(&self, name: &str) -> Option { @@ -398,21 +379,21 @@ fn command(&self, name: &str, args: &[&str]) -> Command { let exe = self - .env_var("PKG_CONFIG") - .unwrap_or_else(|_| String::from("pkg-config")); + .env_var_os("PKG_CONFIG") + .unwrap_or_else(|| OsString::from("pkg-config")); let mut cmd = Command::new(exe); if self.is_static(name) { cmd.arg("--static"); } cmd.args(args).args(&self.extra_args); - if let Ok(value) = self.targetted_env_var("PKG_CONFIG_PATH") { + if let Some(value) = self.targetted_env_var("PKG_CONFIG_PATH") { cmd.env("PKG_CONFIG_PATH", value); } - if let Ok(value) = self.targetted_env_var("PKG_CONFIG_LIBDIR") { + if let Some(value) = self.targetted_env_var("PKG_CONFIG_LIBDIR") { cmd.env("PKG_CONFIG_LIBDIR", value); } - if let Ok(value) = self.targetted_env_var("PKG_CONFIG_SYSROOT_DIR") { + if let Some(value) = self.targetted_env_var("PKG_CONFIG_SYSROOT_DIR") { cmd.env("PKG_CONFIG_SYSROOT_DIR", value); } if self.print_system_libs { diff -Nru cargo-0.44.1/vendor/pkg-config/src/lib.rs cargo-0.47.0/vendor/pkg-config/src/lib.rs --- cargo-0.44.1/vendor/pkg-config/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/pkg-config/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -104,9 +104,8 @@ /// Contains the name of the responsible environment variable. EnvNoPkgConfig(String), - /// Cross compilation detected. - /// - /// Override with `PKG_CONFIG_ALLOW_CROSS=1`. + /// Cross compilation detected. Kept for compatibility; + /// the Debian package never emits this. CrossCompilation, /// Failed to run `pkg-config`. @@ -124,37 +123,12 @@ __Nonexhaustive, } -impl error::Error for Error { - fn description(&self) -> &str { - match *self { - Error::EnvNoPkgConfig(_) => "pkg-config requested to be aborted", - Error::CrossCompilation => { - "pkg-config doesn't handle cross compilation. \ - Use PKG_CONFIG_ALLOW_CROSS=1 to override" - } - Error::Command { .. } => "failed to run pkg-config", - Error::Failure { .. } => "pkg-config did not exit sucessfully", - Error::__Nonexhaustive => panic!(), - } - } - - fn cause(&self) -> Option<&dyn error::Error> { - match *self { - Error::Command { ref cause, .. } => Some(cause), - _ => None, - } - } -} +impl error::Error for Error {} impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { match *self { Error::EnvNoPkgConfig(ref name) => write!(f, "Aborted because {} is set", name), - Error::CrossCompilation => write!( - f, - "Cross compilation detected. \ - Use PKG_CONFIG_ALLOW_CROSS=1 to override" - ), Error::Command { ref command, ref cause, @@ -178,7 +152,7 @@ } Ok(()) } - Error::__Nonexhaustive => panic!(), + Error::CrossCompilation | Error::__Nonexhaustive => panic!(), } } } @@ -215,7 +189,7 @@ print_system_cflags: true, print_system_libs: true, cargo_metadata: true, - env_metadata: false, + env_metadata: true, } } @@ -277,7 +251,7 @@ /// Define whether metadata should be emitted for cargo allowing to /// automatically rebuild when environment variables change. Defaults to - /// `false`. + /// `true`. pub fn env_metadata(&mut self, env_metadata: bool) -> &mut Config { self.env_metadata = env_metadata; self @@ -331,18 +305,30 @@ } pub fn target_supported(&self) -> bool { - let target = env::var("TARGET").unwrap_or_default(); - let host = env::var("HOST").unwrap_or_default(); + let target = env::var_os("TARGET").unwrap_or_default(); + let host = env::var_os("HOST").unwrap_or_default(); // Only use pkg-config in host == target situations by default (allowing an // override). if host == target { return true; } + // always enable PKG_CONFIG_ALLOW_CROSS override in Debian + return true; // pkg-config may not be aware of cross-compilation, and require // a wrapper script that sets up platform-specific prefixes. - true + match self.targetted_env_var("PKG_CONFIG_ALLOW_CROSS") { + // don't use pkg-config if explicitly disabled + Some(ref val) if val == "0" => false, + Some(_) => true, + None => { + // if not disabled, and pkg-config is customized, + // then assume it's prepared for cross-compilation + self.targetted_env_var("PKG_CONFIG").is_some() + || self.targetted_env_var("PKG_CONFIG_SYSROOT_DIR").is_some() + } + } } /// Deprecated in favor of the top level `get_variable` function @@ -351,26 +337,27 @@ get_variable(package, variable).map_err(|e| e.to_string()) } - fn targetted_env_var(&self, var_base: &str) -> Result { - if let Ok(target) = env::var("TARGET") { - let host = env::var("HOST")?; - let kind = if host == target { "HOST" } else { "TARGET" }; - let target_u = target.replace("-", "_"); - - self.env_var(&format!("{}_{}", var_base, target)) - .or_else(|_| self.env_var(&format!("{}_{}", var_base, target_u))) - .or_else(|_| self.env_var(&format!("{}_{}", kind, var_base))) - .or_else(|_| self.env_var(var_base)) - } else { - self.env_var(var_base) - } - } - - fn env_var(&self, name: &str) -> Result { - if self.env_metadata { - println!("cargo:rerun-if-env-changed={}", name); + fn targetted_env_var(&self, var_base: &str) -> Option { + match (env::var("TARGET"), env::var("HOST")) { + (Ok(target), Ok(host)) => { + let kind = if host == target { "HOST" } else { "TARGET" }; + let target_u = target.replace("-", "_"); + + self.env_var_os(&format!("{}_{}", var_base, target)) + .or_else(|| self.env_var_os(&format!("{}_{}", var_base, target_u))) + .or_else(|| self.env_var_os(&format!("{}_{}", kind, var_base))) + .or_else(|| self.env_var_os(var_base)) + } + (Err(env::VarError::NotPresent), _) | (_, Err(env::VarError::NotPresent)) => { + self.env_var_os(var_base) + } + (Err(env::VarError::NotUnicode(s)), _) | (_, Err(env::VarError::NotUnicode(s))) => { + panic!( + "HOST or TARGET environment variable is not valid unicode: {:?}", + s + ) + } } - env::var(name) } fn env_var_os(&self, name: &str) -> Option { @@ -385,24 +372,26 @@ } fn command(&self, name: &str, args: &[&str]) -> Command { - let exe = self.env_var("PKG_CONFIG").unwrap_or_else(|_| { - self.env_var("DEB_HOST_GNU_TYPE") - .map(|t| t.to_string() + "-pkg-config") - .unwrap_or_else(|_| String::from("pkg-config")) - }); + let exe = self + .env_var_os("PKG_CONFIG") + .unwrap_or_else(|| { + self.env_var_os("DEB_HOST_GNU_TYPE") + .map(|mut t| { t.push(OsString::from("-pkg-config")); t }) + .unwrap_or_else(|| OsString::from("pkg-config")) + }); let mut cmd = Command::new(exe); if self.is_static(name) { cmd.arg("--static"); } cmd.args(args).args(&self.extra_args); - if let Ok(value) = self.targetted_env_var("PKG_CONFIG_PATH") { + if let Some(value) = self.targetted_env_var("PKG_CONFIG_PATH") { cmd.env("PKG_CONFIG_PATH", value); } - if let Ok(value) = self.targetted_env_var("PKG_CONFIG_LIBDIR") { + if let Some(value) = self.targetted_env_var("PKG_CONFIG_LIBDIR") { cmd.env("PKG_CONFIG_LIBDIR", value); } - if let Ok(value) = self.targetted_env_var("PKG_CONFIG_SYSROOT_DIR") { + if let Some(value) = self.targetted_env_var("PKG_CONFIG_SYSROOT_DIR") { cmd.env("PKG_CONFIG_SYSROOT_DIR", value); } if self.print_system_libs { diff -Nru cargo-0.44.1/vendor/ppv-lite86/.cargo-checksum.json cargo-0.47.0/vendor/ppv-lite86/.cargo-checksum.json --- cargo-0.44.1/vendor/ppv-lite86/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ppv-lite86/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea"} \ No newline at end of file +{"files":{},"package":"c36fa947111f5c62a733b652544dd0016a43ce89619538a8ef92724a6f501a20"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/ppv-lite86/Cargo.toml cargo-0.47.0/vendor/ppv-lite86/Cargo.toml --- cargo-0.44.1/vendor/ppv-lite86/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ppv-lite86/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "ppv-lite86" -version = "0.2.8" +version = "0.2.9" authors = ["The CryptoCorrosion Contributors"] description = "Implementation of the crypto-simd API for x86" keywords = ["crypto", "simd", "x86"] diff -Nru cargo-0.44.1/vendor/ppv-lite86/src/generic.rs cargo-0.47.0/vendor/ppv-lite86/src/generic.rs --- cargo-0.44.1/vendor/ppv-lite86/src/generic.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ppv-lite86/src/generic.rs 2020-10-01 21:38:28.000000000 +0000 @@ -4,11 +4,11 @@ use crate::soft::{x2, x4}; use crate::types::*; +#[repr(C)] #[derive(Clone, Copy)] pub union vec128_storage { d: [u32; 4], q: [u64; 2], - o: [u128; 1], } impl From<[u32; 4]> for vec128_storage { #[inline] @@ -37,14 +37,14 @@ impl Default for vec128_storage { #[inline] fn default() -> Self { - Self { o: [0] } + Self { q: [0, 0] } } } impl Eq for vec128_storage {} impl PartialEq for vec128_storage { #[inline] fn eq(&self, rhs: &Self) -> bool { - unsafe { self.o == rhs.o } + unsafe { self.q == rhs.q } } } #[derive(Clone, Copy, PartialEq, Eq, Default)] @@ -151,14 +151,22 @@ unsafe { T::unpack(q) } } +fn o_of_q(q: [u64; 2]) -> u128 { + u128::from(q[0]) | (u128::from(q[1]) << 64) +} + +fn q_of_o(o: u128) -> [u64; 2] { + [o as u64, (o >> 64) as u64] +} + fn omap(a: T, f: F) -> T where T: Store + Into, F: Fn(u128) -> u128, { let a: vec128_storage = a.into(); - let ao = unsafe { a.o }; - let o = vec128_storage { o: [f(ao[0])] }; + let ao = o_of_q(unsafe { a.q }); + let o = vec128_storage { q: q_of_o(f(ao)) }; unsafe { T::unpack(o) } } @@ -169,10 +177,10 @@ { let a: vec128_storage = a.into(); let b: vec128_storage = b.into(); - let ao = unsafe { a.o }; - let bo = unsafe { b.o }; + let ao = o_of_q(unsafe { a.q }); + let bo = o_of_q(unsafe { b.q }); let o = vec128_storage { - o: [f(ao[0], bo[0])], + q: q_of_o(f(ao, bo)), }; unsafe { T::unpack(o) } } @@ -456,7 +464,7 @@ impl From for vec128_storage { #[inline(always)] fn from(o: u128x1_generic) -> Self { - Self { o: o.0 } + Self { q: q_of_o(o.0[0]) } } } @@ -475,7 +483,7 @@ impl Store for u128x1_generic { #[inline(always)] unsafe fn unpack(s: vec128_storage) -> Self { - Self(s.o) + Self([o_of_q(s.q); 1]) } } diff -Nru cargo-0.44.1/vendor/proc-macro2/build.rs cargo-0.47.0/vendor/proc-macro2/build.rs --- cargo-0.44.1/vendor/proc-macro2/build.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/proc-macro2/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -14,7 +14,7 @@ // procmacro2_semver_exempt surface area is implemented by using the // nightly-only proc_macro API. // -// "hygiene" +// "hygiene" // Enable Span::mixed_site() and non-dummy behavior of Span::resolved_at // and Span::located_at. Enabled on Rust 1.45+. // @@ -61,6 +61,18 @@ println!("cargo:rustc-cfg=span_locations"); } + if version.minor < 32 { + println!("cargo:rustc-cfg=no_libprocmacro_unwind_safe"); + } + + if version.minor < 39 { + println!("cargo:rustc-cfg=no_bind_by_move_pattern_guard"); + } + + if version.minor >= 44 { + println!("cargo:rustc-cfg=lexerror_display"); + } + if version.minor >= 45 { println!("cargo:rustc-cfg=hygiene"); } diff -Nru cargo-0.44.1/vendor/proc-macro2/.cargo-checksum.json cargo-0.47.0/vendor/proc-macro2/.cargo-checksum.json --- cargo-0.44.1/vendor/proc-macro2/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/proc-macro2/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"1502d12e458c49a4c9cbff560d0fe0060c252bc29799ed94ca2ed4bb665a0101"} \ No newline at end of file +{"files":{},"package":"1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/proc-macro2/Cargo.toml cargo-0.47.0/vendor/proc-macro2/Cargo.toml --- cargo-0.44.1/vendor/proc-macro2/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/proc-macro2/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "proc-macro2" -version = "1.0.17" +version = "1.0.24" authors = ["Alex Crichton ", "David Tolnay "] description = "A substitute implementation of the compiler's `proc_macro` API to decouple\ntoken-based libraries from the procedural macro use case.\n" documentation = "https://docs.rs/proc-macro2" diff -Nru cargo-0.44.1/vendor/proc-macro2/src/fallback.rs cargo-0.47.0/vendor/proc-macro2/src/fallback.rs --- cargo-0.44.1/vendor/proc-macro2/src/fallback.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/proc-macro2/src/fallback.rs 2020-10-01 21:38:28.000000000 +0000 @@ -4,8 +4,8 @@ use std::cell::RefCell; #[cfg(span_locations)] use std::cmp; -use std::fmt; -use std::iter; +use std::fmt::{self, Debug, Display}; +use std::iter::FromIterator; use std::mem; use std::ops::RangeBounds; #[cfg(procmacro2_semver_exempt)] @@ -49,6 +49,49 @@ fn take_inner(&mut self) -> Vec { mem::replace(&mut self.inner, Vec::new()) } + + fn push_token(&mut self, token: TokenTree) { + // https://github.com/alexcrichton/proc-macro2/issues/235 + match token { + #[cfg(not(no_bind_by_move_pattern_guard))] + TokenTree::Literal(crate::Literal { + #[cfg(wrap_proc_macro)] + inner: crate::imp::Literal::Fallback(literal), + #[cfg(not(wrap_proc_macro))] + inner: literal, + .. + }) if literal.text.starts_with('-') => { + push_negative_literal(self, literal); + } + #[cfg(no_bind_by_move_pattern_guard)] + TokenTree::Literal(crate::Literal { + #[cfg(wrap_proc_macro)] + inner: crate::imp::Literal::Fallback(literal), + #[cfg(not(wrap_proc_macro))] + inner: literal, + .. + }) => { + if literal.text.starts_with('-') { + push_negative_literal(self, literal); + } else { + self.inner + .push(TokenTree::Literal(crate::Literal::_new_stable(literal))); + } + } + _ => self.inner.push(token), + } + + #[cold] + fn push_negative_literal(stream: &mut TokenStream, mut literal: Literal) { + literal.text.remove(0); + let mut punct = crate::Punct::new('-', Spacing::Alone); + punct.set_span(crate::Span::_new_stable(literal.span)); + stream.inner.push(TokenTree::Punct(punct)); + stream + .inner + .push(TokenTree::Literal(crate::Literal::_new_stable(literal))); + } + } } // Nonrecursive to prevent stack overflow. @@ -105,7 +148,13 @@ } } -impl fmt::Display for TokenStream { +impl Display for LexError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("cannot parse string into token stream") + } +} + +impl Display for TokenStream { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut joint = false; for (i, tt) in self.inner.iter().enumerate() { @@ -114,36 +163,21 @@ } joint = false; match tt { - TokenTree::Group(tt) => { - let (start, end) = match tt.delimiter() { - Delimiter::Parenthesis => ("(", ")"), - Delimiter::Brace => ("{", "}"), - Delimiter::Bracket => ("[", "]"), - Delimiter::None => ("", ""), - }; - if tt.stream().into_iter().next().is_none() { - write!(f, "{} {}", start, end)? - } else { - write!(f, "{} {} {}", start, tt.stream(), end)? - } - } - TokenTree::Ident(tt) => write!(f, "{}", tt)?, + TokenTree::Group(tt) => Display::fmt(tt, f), + TokenTree::Ident(tt) => Display::fmt(tt, f), TokenTree::Punct(tt) => { - write!(f, "{}", tt.as_char())?; - match tt.spacing() { - Spacing::Alone => {} - Spacing::Joint => joint = true, - } + joint = tt.spacing() == Spacing::Joint; + Display::fmt(tt, f) } - TokenTree::Literal(tt) => write!(f, "{}", tt)?, - } + TokenTree::Literal(tt) => Display::fmt(tt, f), + }? } Ok(()) } } -impl fmt::Debug for TokenStream { +impl Debug for TokenStream { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("TokenStream ")?; f.debug_list().entries(self.clone()).finish() @@ -172,27 +206,25 @@ impl From for TokenStream { fn from(tree: TokenTree) -> TokenStream { - TokenStream { inner: vec![tree] } + let mut stream = TokenStream::new(); + stream.push_token(tree); + stream } } -impl iter::FromIterator for TokenStream { - fn from_iter>(streams: I) -> Self { - let mut v = Vec::new(); - - for token in streams.into_iter() { - v.push(token); - } - - TokenStream { inner: v } +impl FromIterator for TokenStream { + fn from_iter>(tokens: I) -> Self { + let mut stream = TokenStream::new(); + stream.extend(tokens); + stream } } -impl iter::FromIterator for TokenStream { +impl FromIterator for TokenStream { fn from_iter>(streams: I) -> Self { let mut v = Vec::new(); - for mut stream in streams.into_iter() { + for mut stream in streams { v.extend(stream.take_inner()); } @@ -201,15 +233,14 @@ } impl Extend for TokenStream { - fn extend>(&mut self, streams: I) { - self.inner.extend(streams); + fn extend>(&mut self, tokens: I) { + tokens.into_iter().for_each(|token| self.push_token(token)); } } impl Extend for TokenStream { fn extend>(&mut self, streams: I) { - self.inner - .extend(streams.into_iter().flat_map(|stream| stream)); + self.inner.extend(streams.into_iter().flatten()); } } @@ -241,7 +272,7 @@ } } -impl fmt::Debug for SourceFile { +impl Debug for SourceFile { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SourceFile") .field("path", &self.path()) @@ -386,7 +417,6 @@ Span { lo: 0, hi: 0 } } - #[cfg(procmacro2_semver_exempt)] #[cfg(hygiene)] pub fn mixed_site() -> Span { Span::call_site() @@ -397,7 +427,6 @@ Span::call_site() } - #[cfg(procmacro2_semver_exempt)] pub fn resolved_at(&self, _other: Span) -> Span { // Stable spans consist only of line/column information, so // `resolved_at` and `located_at` only select which span the @@ -405,7 +434,6 @@ *self } - #[cfg(procmacro2_semver_exempt)] pub fn located_at(&self, other: Span) -> Span { other } @@ -486,18 +514,25 @@ } } -impl fmt::Debug for Span { +impl Debug for Span { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - #[cfg(procmacro2_semver_exempt)] + #[cfg(span_locations)] return write!(f, "bytes({}..{})", self.lo, self.hi); - #[cfg(not(procmacro2_semver_exempt))] + #[cfg(not(span_locations))] write!(f, "Span") } } pub(crate) fn debug_span_field_if_nontrivial(debug: &mut fmt::DebugStruct, span: Span) { - if cfg!(procmacro2_semver_exempt) { + #[cfg(span_locations)] + { + if span.lo == 0 && span.hi == 0 { + return; + } + } + + if cfg!(span_locations) { debug.field("span", &span); } } @@ -543,30 +578,39 @@ } } -impl fmt::Display for Group { +impl Display for Group { + // We attempt to match libproc_macro's formatting. + // Empty parens: () + // Nonempty parens: (...) + // Empty brackets: [] + // Nonempty brackets: [...] + // Empty braces: { } + // Nonempty braces: { ... } fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let (left, right) = match self.delimiter { + let (open, close) = match self.delimiter { Delimiter::Parenthesis => ("(", ")"), - Delimiter::Brace => ("{", "}"), + Delimiter::Brace => ("{ ", "}"), Delimiter::Bracket => ("[", "]"), Delimiter::None => ("", ""), }; - f.write_str(left)?; - self.stream.fmt(f)?; - f.write_str(right)?; + f.write_str(open)?; + Display::fmt(&self.stream, f)?; + if self.delimiter == Delimiter::Brace && !self.stream.inner.is_empty() { + f.write_str(" ")?; + } + f.write_str(close)?; Ok(()) } } -impl fmt::Debug for Group { +impl Debug for Group { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let mut debug = fmt.debug_struct("Group"); debug.field("delimiter", &self.delimiter); debug.field("stream", &self.stream); - #[cfg(procmacro2_semver_exempt)] - debug.field("span", &self.span); + debug_span_field_if_nontrivial(&mut debug, self.span); debug.finish() } } @@ -670,18 +714,18 @@ } } -impl fmt::Display for Ident { +impl Display for Ident { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.raw { - "r#".fmt(f)?; + f.write_str("r#")?; } - self.sym.fmt(f) + Display::fmt(&self.sym, f) } } -impl fmt::Debug for Ident { +impl Debug for Ident { // Ident(proc_macro), Ident(r#union) - #[cfg(not(procmacro2_semver_exempt))] + #[cfg(not(span_locations))] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut debug = f.debug_tuple("Ident"); debug.field(&format_args!("{}", self)); @@ -692,11 +736,11 @@ // sym: proc_macro, // span: bytes(128..138) // } - #[cfg(procmacro2_semver_exempt)] + #[cfg(span_locations)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut debug = f.debug_struct("Ident"); debug.field("sym", &format_args!("{}", self)); - debug.field("span", &self.span); + debug_span_field_if_nontrivial(&mut debug, self.span); debug.finish() } } @@ -766,7 +810,7 @@ pub fn f32_unsuffixed(f: f32) -> Literal { let mut s = f.to_string(); - if !s.contains(".") { + if !s.contains('.') { s.push_str(".0"); } Literal::_new(s) @@ -774,7 +818,7 @@ pub fn f64_unsuffixed(f: f64) -> Literal { let mut s = f.to_string(); - if !s.contains(".") { + if !s.contains('.') { s.push_str(".0"); } Literal::_new(s) @@ -811,6 +855,7 @@ pub fn byte_string(bytes: &[u8]) -> Literal { let mut escaped = "b\"".to_string(); for b in bytes { + #[allow(clippy::match_overlapping_arm)] match *b { b'\0' => escaped.push_str(r"\0"), b'\t' => escaped.push_str(r"\t"), @@ -839,18 +884,17 @@ } } -impl fmt::Display for Literal { +impl Display for Literal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.text.fmt(f) + Display::fmt(&self.text, f) } } -impl fmt::Debug for Literal { +impl Debug for Literal { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let mut debug = fmt.debug_struct("Literal"); debug.field("lit", &format_args!("{}", self.text)); - #[cfg(procmacro2_semver_exempt)] - debug.field("span", &self.span); + debug_span_field_if_nontrivial(&mut debug, self.span); debug.finish() } } diff -Nru cargo-0.44.1/vendor/proc-macro2/src/lib.rs cargo-0.47.0/vendor/proc-macro2/src/lib.rs --- cargo-0.44.1/vendor/proc-macro2/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/proc-macro2/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -78,24 +78,15 @@ //! a different thread. // Proc-macro2 types in rustdoc of other crates get linked to here. -#![doc(html_root_url = "https://docs.rs/proc-macro2/1.0.17")] +#![doc(html_root_url = "https://docs.rs/proc-macro2/1.0.24")] #![cfg_attr(any(proc_macro_span, super_unstable), feature(proc_macro_span))] #![cfg_attr(super_unstable, feature(proc_macro_raw_ident, proc_macro_def_site))] +#![allow(clippy::needless_doctest_main)] #[cfg(use_proc_macro)] extern crate proc_macro; -use std::cmp::Ordering; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::iter::FromIterator; -use std::marker; -use std::ops::RangeBounds; -#[cfg(procmacro2_semver_exempt)] -use std::path::PathBuf; -use std::rc::Rc; -use std::str::FromStr; - +mod marker; mod parse; #[cfg(wrap_proc_macro)] @@ -112,6 +103,17 @@ #[cfg(wrap_proc_macro)] mod imp; +use crate::marker::Marker; +use std::cmp::Ordering; +use std::error::Error; +use std::fmt::{self, Debug, Display}; +use std::hash::{Hash, Hasher}; +use std::iter::FromIterator; +use std::ops::RangeBounds; +#[cfg(procmacro2_semver_exempt)] +use std::path::PathBuf; +use std::str::FromStr; + /// An abstract stream of tokens, or more concretely a sequence of token trees. /// /// This type provides interfaces for iterating over token trees and for @@ -122,27 +124,27 @@ #[derive(Clone)] pub struct TokenStream { inner: imp::TokenStream, - _marker: marker::PhantomData>, + _marker: Marker, } /// Error returned from `TokenStream::from_str`. pub struct LexError { inner: imp::LexError, - _marker: marker::PhantomData>, + _marker: Marker, } impl TokenStream { fn _new(inner: imp::TokenStream) -> TokenStream { TokenStream { inner, - _marker: marker::PhantomData, + _marker: Marker, } } fn _new_stable(inner: fallback::TokenStream) -> TokenStream { TokenStream { inner: inner.into(), - _marker: marker::PhantomData, + _marker: Marker, } } @@ -179,7 +181,7 @@ fn from_str(src: &str) -> Result { let e = src.parse().map_err(|e| LexError { inner: e, - _marker: marker::PhantomData, + _marker: Marker, })?; Ok(TokenStream::_new(e)) } @@ -234,25 +236,33 @@ /// convertible back into the same token stream (modulo spans), except for /// possibly `TokenTree::Group`s with `Delimiter::None` delimiters and negative /// numeric literals. -impl fmt::Display for TokenStream { +impl Display for TokenStream { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) + Display::fmt(&self.inner, f) } } /// Prints token in a form convenient for debugging. -impl fmt::Debug for TokenStream { +impl Debug for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(&self.inner, f) + } +} + +impl Debug for LexError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) + Debug::fmt(&self.inner, f) } } -impl fmt::Debug for LexError { +impl Display for LexError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) + Display::fmt(&self.inner, f) } } +impl Error for LexError {} + /// The source file of a given `Span`. /// /// This type is semver exempt and not exposed by default. @@ -260,7 +270,7 @@ #[derive(Clone, PartialEq, Eq)] pub struct SourceFile { inner: imp::SourceFile, - _marker: marker::PhantomData>, + _marker: Marker, } #[cfg(procmacro2_semver_exempt)] @@ -268,7 +278,7 @@ fn _new(inner: imp::SourceFile) -> Self { SourceFile { inner, - _marker: marker::PhantomData, + _marker: Marker, } } @@ -297,9 +307,9 @@ } #[cfg(procmacro2_semver_exempt)] -impl fmt::Debug for SourceFile { +impl Debug for SourceFile { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) + Debug::fmt(&self.inner, f) } } @@ -320,7 +330,9 @@ #[cfg(span_locations)] impl Ord for LineColumn { fn cmp(&self, other: &Self) -> Ordering { - self.line.cmp(&other.line).then(self.column.cmp(&other.column)) + self.line + .cmp(&other.line) + .then(self.column.cmp(&other.column)) } } @@ -335,21 +347,21 @@ #[derive(Copy, Clone)] pub struct Span { inner: imp::Span, - _marker: marker::PhantomData>, + _marker: Marker, } impl Span { fn _new(inner: imp::Span) -> Span { Span { inner, - _marker: marker::PhantomData, + _marker: Marker, } } fn _new_stable(inner: fallback::Span) -> Span { Span { inner: inner.into(), - _marker: marker::PhantomData, + _marker: Marker, } } @@ -367,7 +379,6 @@ /// of the macro. This is the same hygiene behavior as `macro_rules`. /// /// This function requires Rust 1.45 or later. - #[cfg(procmacro2_semver_exempt)] #[cfg(hygiene)] pub fn mixed_site() -> Span { Span::_new(imp::Span::mixed_site()) @@ -383,14 +394,12 @@ /// Creates a new span with the same line/column information as `self` but /// that resolves symbols as though it were at `other`. - #[cfg(procmacro2_semver_exempt)] pub fn resolved_at(&self, other: Span) -> Span { Span::_new(self.inner.resolved_at(other.inner)) } /// Creates a new span with the same name resolution behavior as `self` but /// with the line/column information of `other`. - #[cfg(procmacro2_semver_exempt)] pub fn located_at(&self, other: Span) -> Span { Span::_new(self.inner.located_at(other.inner)) } @@ -466,9 +475,9 @@ } /// Prints a span in a form convenient for debugging. -impl fmt::Debug for Span { +impl Debug for Span { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) + Debug::fmt(&self.inner, f) } } @@ -540,32 +549,32 @@ /// convertible back into the same token tree (modulo spans), except for /// possibly `TokenTree::Group`s with `Delimiter::None` delimiters and negative /// numeric literals. -impl fmt::Display for TokenTree { +impl Display for TokenTree { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - TokenTree::Group(t) => t.fmt(f), - TokenTree::Ident(t) => t.fmt(f), - TokenTree::Punct(t) => t.fmt(f), - TokenTree::Literal(t) => t.fmt(f), + TokenTree::Group(t) => Display::fmt(t, f), + TokenTree::Ident(t) => Display::fmt(t, f), + TokenTree::Punct(t) => Display::fmt(t, f), + TokenTree::Literal(t) => Display::fmt(t, f), } } } /// Prints token tree in a form convenient for debugging. -impl fmt::Debug for TokenTree { +impl Debug for TokenTree { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // Each of these has the name in the struct type in the derived debug, // so don't bother with an extra layer of indirection match self { - TokenTree::Group(t) => t.fmt(f), + TokenTree::Group(t) => Debug::fmt(t, f), TokenTree::Ident(t) => { let mut debug = f.debug_struct("Ident"); debug.field("sym", &format_args!("{}", t)); imp::debug_span_field_if_nontrivial(&mut debug, t.span().inner); debug.finish() } - TokenTree::Punct(t) => t.fmt(f), - TokenTree::Literal(t) => t.fmt(f), + TokenTree::Punct(t) => Debug::fmt(t, f), + TokenTree::Literal(t) => Debug::fmt(t, f), } } } @@ -678,15 +687,15 @@ /// Prints the group as a string that should be losslessly convertible back /// into the same group (modulo spans), except for possibly `TokenTree::Group`s /// with `Delimiter::None` delimiters. -impl fmt::Display for Group { +impl Display for Group { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self.inner, formatter) + Display::fmt(&self.inner, formatter) } } -impl fmt::Debug for Group { +impl Debug for Group { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&self.inner, formatter) + Debug::fmt(&self.inner, formatter) } } @@ -696,7 +705,7 @@ /// `Punct` with different forms of `Spacing` returned. #[derive(Clone)] pub struct Punct { - op: char, + ch: char, spacing: Spacing, span: Span, } @@ -722,9 +731,9 @@ /// /// The returned `Punct` will have the default span of `Span::call_site()` /// which can be further configured with the `set_span` method below. - pub fn new(op: char, spacing: Spacing) -> Punct { + pub fn new(ch: char, spacing: Spacing) -> Punct { Punct { - op, + ch, spacing, span: Span::call_site(), } @@ -732,7 +741,7 @@ /// Returns the value of this punctuation character as `char`. pub fn as_char(&self) -> char { - self.op + self.ch } /// Returns the spacing of this punctuation character, indicating whether @@ -757,16 +766,16 @@ /// Prints the punctuation character as a string that should be losslessly /// convertible back into the same character. -impl fmt::Display for Punct { +impl Display for Punct { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.op.fmt(f) + Display::fmt(&self.ch, f) } } -impl fmt::Debug for Punct { +impl Debug for Punct { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let mut debug = fmt.debug_struct("Punct"); - debug.field("op", &self.op); + debug.field("char", &self.ch); debug.field("spacing", &self.spacing); imp::debug_span_field_if_nontrivial(&mut debug, self.span.inner); debug.finish() @@ -840,14 +849,14 @@ #[derive(Clone)] pub struct Ident { inner: imp::Ident, - _marker: marker::PhantomData>, + _marker: Marker, } impl Ident { fn _new(inner: imp::Ident) -> Ident { Ident { inner, - _marker: marker::PhantomData, + _marker: Marker, } } @@ -947,15 +956,15 @@ /// Prints the identifier as a string that should be losslessly convertible back /// into the same identifier. -impl fmt::Display for Ident { +impl Display for Ident { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) + Display::fmt(&self.inner, f) } } -impl fmt::Debug for Ident { +impl Debug for Ident { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) + Debug::fmt(&self.inner, f) } } @@ -968,7 +977,7 @@ #[derive(Clone)] pub struct Literal { inner: imp::Literal, - _marker: marker::PhantomData>, + _marker: Marker, } macro_rules! suffixed_int_literals { @@ -1015,14 +1024,14 @@ fn _new(inner: imp::Literal) -> Literal { Literal { inner, - _marker: marker::PhantomData, + _marker: Marker, } } fn _new_stable(inner: fallback::Literal) -> Literal { Literal { inner: inner.into(), - _marker: marker::PhantomData, + _marker: Marker, } } @@ -1167,24 +1176,23 @@ } } -impl fmt::Debug for Literal { +impl Debug for Literal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) + Debug::fmt(&self.inner, f) } } -impl fmt::Display for Literal { +impl Display for Literal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) + Display::fmt(&self.inner, f) } } /// Public implementation details for the `TokenStream` type, such as iterators. pub mod token_stream { + use crate::marker::Marker; use crate::{imp, TokenTree}; - use std::fmt; - use std::marker; - use std::rc::Rc; + use std::fmt::{self, Debug}; pub use crate::TokenStream; @@ -1195,7 +1203,7 @@ #[derive(Clone)] pub struct IntoIter { inner: imp::TokenTreeIter, - _marker: marker::PhantomData>, + _marker: Marker, } impl Iterator for IntoIter { @@ -1206,9 +1214,9 @@ } } - impl fmt::Debug for IntoIter { + impl Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) + Debug::fmt(&self.inner, f) } } @@ -1219,7 +1227,7 @@ fn into_iter(self) -> IntoIter { IntoIter { inner: self.inner.into_iter(), - _marker: marker::PhantomData, + _marker: Marker, } } } diff -Nru cargo-0.44.1/vendor/proc-macro2/src/marker.rs cargo-0.47.0/vendor/proc-macro2/src/marker.rs --- cargo-0.44.1/vendor/proc-macro2/src/marker.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/proc-macro2/src/marker.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,18 @@ +use std::marker::PhantomData; +use std::panic::{RefUnwindSafe, UnwindSafe}; +use std::rc::Rc; + +// Zero sized marker with the correct set of autotrait impls we want all proc +// macro types to have. +pub(crate) type Marker = PhantomData; + +pub(crate) use self::value::*; + +mod value { + pub(crate) use std::marker::PhantomData as Marker; +} + +pub(crate) struct ProcMacroAutoTraits(Rc<()>); + +impl UnwindSafe for ProcMacroAutoTraits {} +impl RefUnwindSafe for ProcMacroAutoTraits {} diff -Nru cargo-0.44.1/vendor/proc-macro2/src/parse.rs cargo-0.47.0/vendor/proc-macro2/src/parse.rs --- cargo-0.44.1/vendor/proc-macro2/src/parse.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/proc-macro2/src/parse.rs 2020-10-01 21:38:28.000000000 +0000 @@ -2,8 +2,8 @@ is_ident_continue, is_ident_start, Group, LexError, Literal, Span, TokenStream, }; use crate::{Delimiter, Punct, Spacing, TokenTree}; +use std::char; use std::str::{Bytes, CharIndices, Chars}; -use unicode_xid::UnicodeXID; #[derive(Copy, Clone, Eq, PartialEq)] pub(crate) struct Cursor<'a> { @@ -143,7 +143,7 @@ fn word_break(input: Cursor) -> Result { match input.chars().next() { - Some(ch) if UnicodeXID::is_xid_continue(ch) => Err(LexError), + Some(ch) if is_ident_continue(ch) => Err(LexError), Some(_) | None => Ok(input), } } @@ -228,7 +228,7 @@ if let Ok((input, l)) = literal(input) { // must be parsed before ident Ok((input, TokenTree::Literal(crate::Literal::_new_stable(l)))) - } else if let Ok((input, p)) = op(input) { + } else if let Ok((input, p)) = punct(input) { Ok((input, TokenTree::Punct(p))) } else if let Ok((input, i)) = ident(input) { Ok((input, TokenTree::Ident(i))) @@ -238,6 +238,17 @@ } fn ident(input: Cursor) -> PResult { + if ["r\"", "r#\"", "r##", "b\"", "b\'", "br\"", "br#"] + .iter() + .any(|prefix| input.starts_with(prefix)) + { + Err(LexError) + } else { + ident_any(input) + } +} + +fn ident_any(input: Cursor) -> PResult { let raw = input.starts_with("r#"); let rest = input.advance((raw as usize) << 1); @@ -329,13 +340,10 @@ let input = input.advance(i + 1); return Ok(literal_suffix(input)); } - '\r' => { - if let Some((_, '\n')) = chars.next() { - // ... - } else { - break; - } - } + '\r' => match chars.next() { + Some((_, '\n')) => {} + _ => break, + }, '\\' => match chars.next() { Some((_, 'x')) => { if !backslash_x_char(&mut chars) { @@ -349,12 +357,18 @@ break; } } - Some((_, '\n')) | Some((_, '\r')) => { - while let Some(&(_, ch)) = chars.peek() { - if ch.is_whitespace() { - chars.next(); - } else { - break; + Some((_, ch @ '\n')) | Some((_, ch @ '\r')) => { + let mut last = ch; + loop { + if last == '\r' && chars.next().map_or(true, |(_, ch)| ch != '\n') { + return Err(LexError); + } + match chars.peek() { + Some((_, ch)) if ch.is_whitespace() => { + last = *ch; + chars.next(); + } + _ => break, } } } @@ -378,19 +392,16 @@ fn cooked_byte_string(mut input: Cursor) -> Result { let mut bytes = input.bytes().enumerate(); - 'outer: while let Some((offset, b)) = bytes.next() { + while let Some((offset, b)) = bytes.next() { match b { b'"' => { let input = input.advance(offset + 1); return Ok(literal_suffix(input)); } - b'\r' => { - if let Some((_, b'\n')) = bytes.next() { - // ... - } else { - break; - } - } + b'\r' => match bytes.next() { + Some((_, b'\n')) => {} + _ => break, + }, b'\\' => match bytes.next() { Some((_, b'x')) => { if !backslash_x_byte(&mut bytes) { @@ -399,16 +410,24 @@ } Some((_, b'n')) | Some((_, b'r')) | Some((_, b't')) | Some((_, b'\\')) | Some((_, b'0')) | Some((_, b'\'')) | Some((_, b'"')) => {} - Some((newline, b'\n')) | Some((newline, b'\r')) => { + Some((newline, b @ b'\n')) | Some((newline, b @ b'\r')) => { + let mut last = b as char; let rest = input.advance(newline + 1); - for (offset, ch) in rest.char_indices() { - if !ch.is_whitespace() { - input = rest.advance(offset); - bytes = input.bytes().enumerate(); - continue 'outer; + let mut chars = rest.char_indices(); + loop { + if last == '\r' && chars.next().map_or(true, |(_, ch)| ch != '\n') { + return Err(LexError); + } + match chars.next() { + Some((_, ch)) if ch.is_whitespace() => last = ch, + Some((offset, _)) => { + input = rest.advance(offset); + bytes = input.bytes().enumerate(); + break; + } + None => return Err(LexError), } } - break; } _ => break, }, @@ -432,13 +451,16 @@ _ => return Err(LexError), } } - for (i, ch) in chars { + while let Some((i, ch)) = chars.next() { match ch { '"' if input.rest[i + 1..].starts_with(&input.rest[..n]) => { let rest = input.advance(i + 1 + n); return Ok(literal_suffix(rest)); } - '\r' => {} + '\r' => match chars.next() { + Some((_, '\n')) => {} + _ => break, + }, _ => {} } } @@ -525,13 +547,25 @@ I: Iterator, { next_ch!(chars @ '{'); - next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F'); - loop { - let c = next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F' | '_' | '}'); - if c == '}' { - return true; + let mut value = 0; + let mut len = 0; + while let Some((_, ch)) = chars.next() { + let digit = match ch { + '0'..='9' => ch as u8 - b'0', + 'a'..='f' => 10 + ch as u8 - b'a', + 'A'..='F' => 10 + ch as u8 - b'A', + '_' if len > 0 => continue, + '}' if len > 0 => return char::from_u32(value).is_some(), + _ => return false, + }; + if len == 6 { + return false; } + value *= 0x10; + value += u32::from(digit); + len += 1; } + false } fn float(input: Cursor) -> Result { @@ -585,12 +619,17 @@ } } - let rest = input.advance(len); - if !(has_dot || has_exp || rest.starts_with("f32") || rest.starts_with("f64")) { + if !(has_dot || has_exp) { return Err(LexError); } if has_exp { + let token_before_exp = if has_dot { + Ok(input.advance(len - 1)) + } else { + Err(LexError) + }; + let mut has_sign = false; let mut has_exp_value = false; while let Some(&ch) = chars.peek() { match ch { @@ -598,8 +637,12 @@ if has_exp_value { break; } + if has_sign { + return token_before_exp; + } chars.next(); len += 1; + has_sign = true; } '0'..='9' => { chars.next(); @@ -614,7 +657,7 @@ } } if !has_exp_value { - return Err(LexError); + return token_before_exp; } } @@ -648,10 +691,25 @@ let mut len = 0; let mut empty = true; for b in input.bytes() { - let digit = match b { - b'0'..=b'9' => (b - b'0') as u64, - b'a'..=b'f' => 10 + (b - b'a') as u64, - b'A'..=b'F' => 10 + (b - b'A') as u64, + match b { + b'0'..=b'9' => { + let digit = (b - b'0') as u64; + if digit >= base { + return Err(LexError); + } + } + b'a'..=b'f' => { + let digit = 10 + (b - b'a') as u64; + if digit >= base { + break; + } + } + b'A'..=b'F' => { + let digit = 10 + (b - b'A') as u64; + if digit >= base { + break; + } + } b'_' => { if empty && base == 10 { return Err(LexError); @@ -661,9 +719,6 @@ } _ => break, }; - if digit >= base { - return Err(LexError); - } len += 1; empty = false; } @@ -674,14 +729,17 @@ } } -fn op(input: Cursor) -> PResult { - match op_char(input) { +fn punct(input: Cursor) -> PResult { + match punct_char(input) { Ok((rest, '\'')) => { - ident(rest)?; - Ok((rest, Punct::new('\'', Spacing::Joint))) + if ident_any(rest)?.0.starts_with("'") { + Err(LexError) + } else { + Ok((rest, Punct::new('\'', Spacing::Joint))) + } } Ok((rest, ch)) => { - let kind = match op_char(rest) { + let kind = match punct_char(rest) { Ok(_) => Spacing::Joint, Err(LexError) => Spacing::Alone, }; @@ -691,9 +749,9 @@ } } -fn op_char(input: Cursor) -> PResult { +fn punct_char(input: Cursor) -> PResult { if input.starts_with("//") || input.starts_with("/*") { - // Do not accept `/` of a comment as an op. + // Do not accept `/` of a comment as a punct. return Err(LexError); } @@ -768,7 +826,7 @@ } let (input, s) = take_until_newline_or_eof(input); Ok((input, (s, false))) - } else if input.starts_with("/**") && !input.rest[3..].starts_with("*") { + } else if input.starts_with("/**") && !input.rest[3..].starts_with('*') { let (input, s) = block_comment(input)?; Ok((input, (&s[3..s.len() - 2], false))) } else { @@ -777,9 +835,9 @@ } fn take_until_newline_or_eof(input: Cursor) -> (Cursor, &str) { - let mut chars = input.char_indices(); + let chars = input.char_indices(); - while let Some((i, ch)) = chars.next() { + for (i, ch) in chars { if ch == '\n' { return (input.advance(i), &input.rest[..i]); } else if ch == '\r' && input.rest[i + 1..].starts_with('\n') { diff -Nru cargo-0.44.1/vendor/proc-macro2/src/wrapper.rs cargo-0.47.0/vendor/proc-macro2/src/wrapper.rs --- cargo-0.44.1/vendor/proc-macro2/src/wrapper.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/proc-macro2/src/wrapper.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,7 +1,7 @@ use crate::detection::inside_proc_macro; use crate::{fallback, Delimiter, Punct, Spacing, TokenTree}; -use std::fmt; -use std::iter; +use std::fmt::{self, Debug, Display}; +use std::iter::FromIterator; use std::ops::RangeBounds; use std::panic; #[cfg(super_unstable)] @@ -46,7 +46,12 @@ } fn evaluate_now(&mut self) { - self.stream.extend(self.extra.drain(..)); + // If-check provides a fast short circuit for the common case of `extra` + // being empty, which saves a round trip over the proc macro bridge. + // Improves macro expansion time in winrt by 6% in debug mode. + if !self.extra.is_empty() { + self.stream.extend(self.extra.drain(..)); + } } fn into_token_stream(mut self) -> proc_macro::TokenStream { @@ -106,11 +111,11 @@ .unwrap_or(Err(LexError::Fallback(fallback::LexError))) } -impl fmt::Display for TokenStream { +impl Display for TokenStream { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - TokenStream::Compiler(tts) => tts.clone().into_token_stream().fmt(f), - TokenStream::Fallback(tts) => tts.fmt(f), + TokenStream::Compiler(tts) => Display::fmt(&tts.clone().into_token_stream(), f), + TokenStream::Fallback(tts) => Display::fmt(tts, f), } } } @@ -145,9 +150,9 @@ Spacing::Joint => proc_macro::Spacing::Joint, Spacing::Alone => proc_macro::Spacing::Alone, }; - let mut op = proc_macro::Punct::new(tt.as_char(), spacing); - op.set_span(tt.span().inner.unwrap_nightly()); - op.into() + let mut punct = proc_macro::Punct::new(tt.as_char(), spacing); + punct.set_span(tt.span().inner.unwrap_nightly()); + punct.into() } TokenTree::Ident(tt) => tt.inner.unwrap_nightly().into(), TokenTree::Literal(tt) => tt.inner.unwrap_nightly().into(), @@ -164,7 +169,7 @@ } } -impl iter::FromIterator for TokenStream { +impl FromIterator for TokenStream { fn from_iter>(trees: I) -> Self { if inside_proc_macro() { TokenStream::Compiler(DeferredTokenStream::new( @@ -176,7 +181,7 @@ } } -impl iter::FromIterator for TokenStream { +impl FromIterator for TokenStream { fn from_iter>(streams: I) -> Self { let mut streams = streams.into_iter(); match streams.next() { @@ -201,14 +206,15 @@ } impl Extend for TokenStream { - fn extend>(&mut self, streams: I) { + fn extend>(&mut self, stream: I) { match self { TokenStream::Compiler(tts) => { // Here is the reason for DeferredTokenStream. - tts.extra - .extend(streams.into_iter().map(into_compiler_token)); + for token in stream { + tts.extra.push(into_compiler_token(token)); + } } - TokenStream::Fallback(tts) => tts.extend(streams), + TokenStream::Fallback(tts) => tts.extend(stream), } } } @@ -219,20 +225,20 @@ TokenStream::Compiler(tts) => { tts.evaluate_now(); tts.stream - .extend(streams.into_iter().map(|stream| stream.unwrap_nightly())); + .extend(streams.into_iter().map(TokenStream::unwrap_nightly)); } TokenStream::Fallback(tts) => { - tts.extend(streams.into_iter().map(|stream| stream.unwrap_stable())); + tts.extend(streams.into_iter().map(TokenStream::unwrap_stable)); } } } } -impl fmt::Debug for TokenStream { +impl Debug for TokenStream { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - TokenStream::Compiler(tts) => tts.clone().into_token_stream().fmt(f), - TokenStream::Fallback(tts) => tts.fmt(f), + TokenStream::Compiler(tts) => Debug::fmt(&tts.clone().into_token_stream(), f), + TokenStream::Fallback(tts) => Debug::fmt(tts, f), } } } @@ -249,11 +255,23 @@ } } -impl fmt::Debug for LexError { +impl Debug for LexError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + LexError::Compiler(e) => Debug::fmt(e, f), + LexError::Fallback(e) => Debug::fmt(e, f), + } + } +} + +impl Display for LexError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - LexError::Compiler(e) => e.fmt(f), - LexError::Fallback(e) => e.fmt(f), + #[cfg(lexerror_display)] + LexError::Compiler(e) => Display::fmt(e, f), + #[cfg(not(lexerror_display))] + LexError::Compiler(_e) => Display::fmt(&fallback::LexError, f), + LexError::Fallback(e) => Display::fmt(e, f), } } } @@ -310,7 +328,7 @@ } } -impl fmt::Debug for TokenTreeIter { +impl Debug for TokenTreeIter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("TokenTreeIter").finish() } @@ -346,11 +364,11 @@ } #[cfg(super_unstable)] -impl fmt::Debug for SourceFile { +impl Debug for SourceFile { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - SourceFile::Compiler(a) => a.fmt(f), - SourceFile::Fallback(a) => a.fmt(f), + SourceFile::Compiler(a) => Debug::fmt(a, f), + SourceFile::Fallback(a) => Debug::fmt(a, f), } } } @@ -376,7 +394,6 @@ } } - #[cfg(procmacro2_semver_exempt)] #[cfg(hygiene)] pub fn mixed_site() -> Span { if inside_proc_macro() { @@ -395,7 +412,6 @@ } } - #[cfg(procmacro2_semver_exempt)] pub fn resolved_at(&self, other: Span) -> Span { match (self, other) { #[cfg(hygiene)] @@ -410,7 +426,6 @@ } } - #[cfg(procmacro2_semver_exempt)] pub fn located_at(&self, other: Span) -> Span { match (self, other) { #[cfg(hygiene)] @@ -513,11 +528,11 @@ } } -impl fmt::Debug for Span { +impl Debug for Span { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Span::Compiler(s) => s.fmt(f), - Span::Fallback(s) => s.fmt(f), + Span::Compiler(s) => Debug::fmt(s, f), + Span::Fallback(s) => Debug::fmt(s, f), } } } @@ -623,20 +638,20 @@ } } -impl fmt::Display for Group { +impl Display for Group { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { match self { - Group::Compiler(group) => group.fmt(formatter), - Group::Fallback(group) => group.fmt(formatter), + Group::Compiler(group) => Display::fmt(group, formatter), + Group::Fallback(group) => Display::fmt(group, formatter), } } } -impl fmt::Debug for Group { +impl Debug for Group { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { match self { - Group::Compiler(group) => group.fmt(formatter), - Group::Fallback(group) => group.fmt(formatter), + Group::Compiler(group) => Debug::fmt(group, formatter), + Group::Fallback(group) => Debug::fmt(group, formatter), } } } @@ -718,20 +733,20 @@ } } -impl fmt::Display for Ident { +impl Display for Ident { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Ident::Compiler(t) => t.fmt(f), - Ident::Fallback(t) => t.fmt(f), + Ident::Compiler(t) => Display::fmt(t, f), + Ident::Fallback(t) => Display::fmt(t, f), } } } -impl fmt::Debug for Ident { +impl Debug for Ident { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Ident::Compiler(t) => t.fmt(f), - Ident::Fallback(t) => t.fmt(f), + Ident::Compiler(t) => Debug::fmt(t, f), + Ident::Fallback(t) => Debug::fmt(t, f), } } } @@ -879,20 +894,20 @@ } } -impl fmt::Display for Literal { +impl Display for Literal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Literal::Compiler(t) => t.fmt(f), - Literal::Fallback(t) => t.fmt(f), + Literal::Compiler(t) => Display::fmt(t, f), + Literal::Fallback(t) => Display::fmt(t, f), } } } -impl fmt::Debug for Literal { +impl Debug for Literal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Literal::Compiler(t) => t.fmt(f), - Literal::Fallback(t) => t.fmt(f), + Literal::Compiler(t) => Debug::fmt(t, f), + Literal::Fallback(t) => Debug::fmt(t, f), } } } diff -Nru cargo-0.44.1/vendor/proc-macro2/tests/marker.rs cargo-0.47.0/vendor/proc-macro2/tests/marker.rs --- cargo-0.44.1/vendor/proc-macro2/tests/marker.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/proc-macro2/tests/marker.rs 2020-10-01 21:38:28.000000000 +0000 @@ -57,3 +57,36 @@ assert_impl!(SourceFile is not Send or Sync); } + +#[cfg(not(no_libprocmacro_unwind_safe))] +mod unwind_safe { + use super::*; + use std::panic::{RefUnwindSafe, UnwindSafe}; + + macro_rules! assert_unwind_safe { + ($($types:ident)*) => { + $( + assert_impl!($types is UnwindSafe and RefUnwindSafe); + )* + }; + } + + assert_unwind_safe! { + Delimiter + Group + Ident + LexError + Literal + Punct + Spacing + Span + TokenStream + TokenTree + } + + #[cfg(procmacro2_semver_exempt)] + assert_unwind_safe! { + LineColumn + SourceFile + } +} diff -Nru cargo-0.44.1/vendor/proc-macro2/tests/test_fmt.rs cargo-0.47.0/vendor/proc-macro2/tests/test_fmt.rs --- cargo-0.44.1/vendor/proc-macro2/tests/test_fmt.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/proc-macro2/tests/test_fmt.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,26 @@ +use proc_macro2::{Delimiter, Group, Ident, Span, TokenStream, TokenTree}; +use std::iter::{self, FromIterator}; + +#[test] +fn test_fmt_group() { + let ident = Ident::new("x", Span::call_site()); + let inner = TokenStream::from_iter(iter::once(TokenTree::Ident(ident))); + let parens_empty = Group::new(Delimiter::Parenthesis, TokenStream::new()); + let parens_nonempty = Group::new(Delimiter::Parenthesis, inner.clone()); + let brackets_empty = Group::new(Delimiter::Bracket, TokenStream::new()); + let brackets_nonempty = Group::new(Delimiter::Bracket, inner.clone()); + let braces_empty = Group::new(Delimiter::Brace, TokenStream::new()); + let braces_nonempty = Group::new(Delimiter::Brace, inner.clone()); + let none_empty = Group::new(Delimiter::None, TokenStream::new()); + let none_nonempty = Group::new(Delimiter::None, inner.clone()); + + // Matches libproc_macro. + assert_eq!("()", parens_empty.to_string()); + assert_eq!("(x)", parens_nonempty.to_string()); + assert_eq!("[]", brackets_empty.to_string()); + assert_eq!("[x]", brackets_nonempty.to_string()); + assert_eq!("{ }", braces_empty.to_string()); + assert_eq!("{ x }", braces_nonempty.to_string()); + assert_eq!("", none_empty.to_string()); + assert_eq!("x", none_nonempty.to_string()); +} diff -Nru cargo-0.44.1/vendor/proc-macro2/tests/test.rs cargo-0.47.0/vendor/proc-macro2/tests/test.rs --- cargo-0.44.1/vendor/proc-macro2/tests/test.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/proc-macro2/tests/test.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,4 @@ -use proc_macro2::{Ident, Literal, Spacing, Span, TokenStream, TokenTree}; +use proc_macro2::{Ident, Literal, Punct, Spacing, Span, TokenStream, TokenTree}; use std::str::{self, FromStr}; #[test] @@ -84,6 +84,11 @@ } #[test] +fn literal_raw_string() { + "r\"\r\n\"".parse::().unwrap(); +} + +#[test] fn literal_character() { assert_eq!(Literal::character('x').to_string(), "'x'"); assert_eq!(Literal::character('\'').to_string(), "'\\''"); @@ -115,6 +120,31 @@ assert_eq!(token_count("r#\"\"#r"), 1); assert_eq!(token_count("'c'c"), 1); assert_eq!(token_count("b'b'b"), 1); + assert_eq!(token_count("0E"), 1); + assert_eq!(token_count("0o0A"), 1); + assert_eq!(token_count("0E--0"), 4); + assert_eq!(token_count("0.0ECMA"), 1); +} + +#[test] +fn literal_iter_negative() { + let negative_literal = Literal::i32_suffixed(-3); + let tokens = TokenStream::from(TokenTree::Literal(negative_literal)); + let mut iter = tokens.into_iter(); + match iter.next().unwrap() { + TokenTree::Punct(punct) => { + assert_eq!(punct.as_char(), '-'); + assert_eq!(punct.spacing(), Spacing::Alone); + } + unexpected => panic!("unexpected token {:?}", unexpected), + } + match iter.next().unwrap() { + TokenTree::Literal(literal) => { + assert_eq!(literal.to_string(), "3i32"); + } + unexpected => panic!("unexpected token {:?}", unexpected), + } + assert!(iter.next().is_none()); } #[test] @@ -166,6 +196,16 @@ fail("' static"); fail("r#1"); fail("r#_"); + fail("\"\\u{0000000}\""); // overlong unicode escape (rust allows at most 6 hex digits) + fail("\"\\u{999999}\""); // outside of valid range of char + fail("\"\\u{_0}\""); // leading underscore + fail("\"\\u{}\""); // empty + fail("b\"\r\""); // bare carriage return in byte string + fail("r\"\r\""); // bare carriage return in raw string + fail("\"\\\r \""); // backslash carriage return + fail("'aa'aa"); + fail("br##\"\"#"); + fail("\"\\\n\u{85}\r\""); } #[cfg(span_locations)] @@ -253,7 +293,7 @@ } #[test] -fn op_before_comment() { +fn punct_before_comment() { let mut tts = TokenStream::from_str("~// comment").unwrap().into_iter(); match tts.next().unwrap() { TokenTree::Punct(tt) => { @@ -265,6 +305,22 @@ } #[test] +fn joint_last_token() { + // This test verifies that we match the behavior of libproc_macro *not* in + // the range nightly-2020-09-06 through nightly-2020-09-10, in which this + // behavior was temporarily broken. + // See https://github.com/rust-lang/rust/issues/76399 + + let joint_punct = Punct::new(':', Spacing::Joint); + let stream = TokenStream::from(TokenTree::Punct(joint_punct)); + let punct = match stream.into_iter().next().unwrap() { + TokenTree::Punct(punct) => punct, + _ => unreachable!(), + }; + assert_eq!(punct.spacing(), Spacing::Joint); +} + +#[test] fn raw_identifier() { let mut tts = TokenStream::from_str("r#dyn").unwrap().into_iter(); match tts.next().unwrap() { @@ -278,11 +334,11 @@ fn test_debug_ident() { let ident = Ident::new("proc_macro", Span::call_site()); - #[cfg(not(procmacro2_semver_exempt))] + #[cfg(not(span_locations))] let expected = "Ident(proc_macro)"; - #[cfg(procmacro2_semver_exempt)] - let expected = "Ident { sym: proc_macro, span: bytes(0..0) }"; + #[cfg(span_locations)] + let expected = "Ident { sym: proc_macro }"; assert_eq!(expected, format!("{:?}", ident)); } @@ -291,7 +347,7 @@ fn test_debug_tokenstream() { let tts = TokenStream::from_str("[a + 1]").unwrap(); - #[cfg(not(procmacro2_semver_exempt))] + #[cfg(not(span_locations))] let expected = "\ TokenStream [ Group { @@ -301,7 +357,7 @@ sym: a, }, Punct { - op: '+', + char: '+', spacing: Alone, }, Literal { @@ -312,7 +368,7 @@ ]\ "; - #[cfg(not(procmacro2_semver_exempt))] + #[cfg(not(span_locations))] let expected_before_trailing_commas = "\ TokenStream [ Group { @@ -322,7 +378,7 @@ sym: a }, Punct { - op: '+', + char: '+', spacing: Alone }, Literal { @@ -333,7 +389,7 @@ ]\ "; - #[cfg(procmacro2_semver_exempt)] + #[cfg(span_locations)] let expected = "\ TokenStream [ Group { @@ -344,7 +400,7 @@ span: bytes(2..3), }, Punct { - op: '+', + char: '+', spacing: Alone, span: bytes(4..5), }, @@ -358,7 +414,7 @@ ]\ "; - #[cfg(procmacro2_semver_exempt)] + #[cfg(span_locations)] let expected_before_trailing_commas = "\ TokenStream [ Group { @@ -369,7 +425,7 @@ span: bytes(2..3) }, Punct { - op: '+', + char: '+', spacing: Alone, span: bytes(4..5) }, diff -Nru cargo-0.44.1/vendor/quote/.cargo-checksum.json cargo-0.47.0/vendor/quote/.cargo-checksum.json --- cargo-0.44.1/vendor/quote/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/quote/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"54a21852a652ad6f610c9510194f398ff6f8692e334fd1145fed931f7fbe44ea"} \ No newline at end of file +{"files":{},"package":"aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/quote/Cargo.toml cargo-0.47.0/vendor/quote/Cargo.toml --- cargo-0.44.1/vendor/quote/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/quote/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "quote" -version = "1.0.6" +version = "1.0.7" authors = ["David Tolnay "] include = ["Cargo.toml", "src/**/*.rs", "tests/**/*.rs", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] description = "Quasi-quoting macro quote!(...)" diff -Nru cargo-0.44.1/vendor/quote/src/lib.rs cargo-0.47.0/vendor/quote/src/lib.rs --- cargo-0.44.1/vendor/quote/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/quote/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -81,9 +81,8 @@ //! ``` #![forbid(unsafe_code)] - // Quote types in rustdoc of other crates get linked to here. -#![doc(html_root_url = "https://docs.rs/quote/1.0.6")] +#![doc(html_root_url = "https://docs.rs/quote/1.0.7")] #[cfg(all( not(all(target_arch = "wasm32", target_os = "unknown")), @@ -472,9 +471,14 @@ /// ``` #[macro_export] macro_rules! quote { - ($($tt:tt)*) => { - $crate::quote_spanned!($crate::__private::Span::call_site()=> $($tt)*) + () => { + $crate::__private::TokenStream::new() }; + ($($tt:tt)*) => {{ + let mut _s = $crate::__private::TokenStream::new(); + $crate::quote_each_token!(_s $($tt)*); + _s + }}; } /// Same as `quote!`, but applies a given span to all tokens originating within @@ -575,10 +579,14 @@ /// placed appropriately by the compiler. #[macro_export] macro_rules! quote_spanned { + ($span:expr=>) => {{ + let _: $crate::__private::Span = $span; + $crate::__private::TokenStream::new() + }}; ($span:expr=> $($tt:tt)*) => {{ let mut _s = $crate::__private::TokenStream::new(); let _span: $crate::__private::Span = $span; - $crate::quote_each_token!(_s _span $($tt)*); + $crate::quote_each_token_spanned!(_s _span $($tt)*); _s }}; } @@ -657,8 +665,24 @@ #[macro_export] #[doc(hidden)] macro_rules! quote_each_token { + ($tokens:ident $($tts:tt)*) => { + $crate::quote_tokens_with_context!($tokens + (@ @ @ @ @ @ $($tts)*) + (@ @ @ @ @ $($tts)* @) + (@ @ @ @ $($tts)* @ @) + (@ @ @ $(($tts))* @ @ @) + (@ @ $($tts)* @ @ @ @) + (@ $($tts)* @ @ @ @ @) + ($($tts)* @ @ @ @ @ @) + ); + }; +} + +#[macro_export] +#[doc(hidden)] +macro_rules! quote_each_token_spanned { ($tokens:ident $span:ident $($tts:tt)*) => { - $crate::quote_tokens_with_context!($tokens $span + $crate::quote_tokens_with_context_spanned!($tokens $span (@ @ @ @ @ @ $($tts)*) (@ @ @ @ @ $($tts)* @) (@ @ @ @ $($tts)* @ @) @@ -673,13 +697,27 @@ #[macro_export] #[doc(hidden)] macro_rules! quote_tokens_with_context { + ($tokens:ident + ($($b3:tt)*) ($($b2:tt)*) ($($b1:tt)*) + ($($curr:tt)*) + ($($a1:tt)*) ($($a2:tt)*) ($($a3:tt)*) + ) => { + $( + $crate::quote_token_with_context!($tokens $b3 $b2 $b1 $curr $a1 $a2 $a3); + )* + }; +} + +#[macro_export] +#[doc(hidden)] +macro_rules! quote_tokens_with_context_spanned { ($tokens:ident $span:ident ($($b3:tt)*) ($($b2:tt)*) ($($b1:tt)*) ($($curr:tt)*) ($($a1:tt)*) ($($a2:tt)*) ($($a3:tt)*) ) => { $( - $crate::quote_token_with_context!($tokens $span $b3 $b2 $b1 $curr $a1 $a2 $a3); + $crate::quote_token_with_context_spanned!($tokens $span $b3 $b2 $b1 $curr $a1 $a2 $a3); )* }; } @@ -687,6 +725,62 @@ #[macro_export] #[doc(hidden)] macro_rules! quote_token_with_context { + ($tokens:ident $b3:tt $b2:tt $b1:tt @ $a1:tt $a2:tt $a3:tt) => {}; + + ($tokens:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) * $a3:tt) => {{ + use $crate::__private::ext::*; + let has_iter = $crate::__private::ThereIsNoIteratorInRepetition; + $crate::pounded_var_names!(quote_bind_into_iter!(has_iter) () $($inner)*); + let _: $crate::__private::HasIterator = has_iter; + // This is `while true` instead of `loop` because if there are no + // iterators used inside of this repetition then the body would not + // contain any `break`, so the compiler would emit unreachable code + // warnings on anything below the loop. We use has_iter to detect and + // fail to compile when there are no iterators, so here we just work + // around the unneeded extra warning. + while true { + $crate::pounded_var_names!(quote_bind_next_or_break!() () $($inner)*); + $crate::quote_each_token!($tokens $($inner)*); + } + }}; + ($tokens:ident $b3:tt $b2:tt # (( $($inner:tt)* )) * $a2:tt $a3:tt) => {}; + ($tokens:ident $b3:tt # ( $($inner:tt)* ) (*) $a1:tt $a2:tt $a3:tt) => {}; + + ($tokens:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) $sep:tt *) => {{ + use $crate::__private::ext::*; + let mut _i = 0usize; + let has_iter = $crate::__private::ThereIsNoIteratorInRepetition; + $crate::pounded_var_names!(quote_bind_into_iter!(has_iter) () $($inner)*); + let _: $crate::__private::HasIterator = has_iter; + while true { + $crate::pounded_var_names!(quote_bind_next_or_break!() () $($inner)*); + if _i > 0 { + $crate::quote_token!($tokens $sep); + } + _i += 1; + $crate::quote_each_token!($tokens $($inner)*); + } + }}; + ($tokens:ident $b3:tt $b2:tt # (( $($inner:tt)* )) $sep:tt * $a3:tt) => {}; + ($tokens:ident $b3:tt # ( $($inner:tt)* ) ($sep:tt) * $a2:tt $a3:tt) => {}; + ($tokens:ident # ( $($inner:tt)* ) * (*) $a1:tt $a2:tt $a3:tt) => { + // https://github.com/dtolnay/quote/issues/130 + $crate::quote_token!($tokens *); + }; + ($tokens:ident # ( $($inner:tt)* ) $sep:tt (*) $a1:tt $a2:tt $a3:tt) => {}; + + ($tokens:ident $b3:tt $b2:tt $b1:tt (#) $var:ident $a2:tt $a3:tt) => { + $crate::ToTokens::to_tokens(&$var, &mut $tokens); + }; + ($tokens:ident $b3:tt $b2:tt # ($var:ident) $a1:tt $a2:tt $a3:tt) => {}; + ($tokens:ident $b3:tt $b2:tt $b1:tt ($curr:tt) $a1:tt $a2:tt $a3:tt) => { + $crate::quote_token!($tokens $curr); + }; +} + +#[macro_export] +#[doc(hidden)] +macro_rules! quote_token_with_context_spanned { ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt @ $a1:tt $a2:tt $a3:tt) => {}; ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) * $a3:tt) => {{ @@ -702,7 +796,7 @@ // around the unneeded extra warning. while true { $crate::pounded_var_names!(quote_bind_next_or_break!() () $($inner)*); - $crate::quote_each_token!($tokens $span $($inner)*); + $crate::quote_each_token_spanned!($tokens $span $($inner)*); } }}; ($tokens:ident $span:ident $b3:tt $b2:tt # (( $($inner:tt)* )) * $a2:tt $a3:tt) => {}; @@ -717,17 +811,17 @@ while true { $crate::pounded_var_names!(quote_bind_next_or_break!() () $($inner)*); if _i > 0 { - $crate::quote_token!($tokens $span $sep); + $crate::quote_token_spanned!($tokens $span $sep); } _i += 1; - $crate::quote_each_token!($tokens $span $($inner)*); + $crate::quote_each_token_spanned!($tokens $span $($inner)*); } }}; ($tokens:ident $span:ident $b3:tt $b2:tt # (( $($inner:tt)* )) $sep:tt * $a3:tt) => {}; ($tokens:ident $span:ident $b3:tt # ( $($inner:tt)* ) ($sep:tt) * $a2:tt $a3:tt) => {}; ($tokens:ident $span:ident # ( $($inner:tt)* ) * (*) $a1:tt $a2:tt $a3:tt) => { // https://github.com/dtolnay/quote/issues/130 - $crate::quote_token!($tokens $span *); + $crate::quote_token_spanned!($tokens $span *); }; ($tokens:ident $span:ident # ( $($inner:tt)* ) $sep:tt (*) $a1:tt $a2:tt $a3:tt) => {}; @@ -736,227 +830,433 @@ }; ($tokens:ident $span:ident $b3:tt $b2:tt # ($var:ident) $a1:tt $a2:tt $a3:tt) => {}; ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt ($curr:tt) $a1:tt $a2:tt $a3:tt) => { - $crate::quote_token!($tokens $span $curr); + $crate::quote_token_spanned!($tokens $span $curr); }; } #[macro_export] #[doc(hidden)] macro_rules! quote_token { + ($tokens:ident ( $($inner:tt)* )) => { + $crate::__private::push_group( + &mut $tokens, + $crate::__private::Delimiter::Parenthesis, + $crate::quote!($($inner)*), + ); + }; + + ($tokens:ident [ $($inner:tt)* ]) => { + $crate::__private::push_group( + &mut $tokens, + $crate::__private::Delimiter::Bracket, + $crate::quote!($($inner)*), + ); + }; + + ($tokens:ident { $($inner:tt)* }) => { + $crate::__private::push_group( + &mut $tokens, + $crate::__private::Delimiter::Brace, + $crate::quote!($($inner)*), + ); + }; + + ($tokens:ident +) => { + $crate::__private::push_add(&mut $tokens); + }; + + ($tokens:ident +=) => { + $crate::__private::push_add_eq(&mut $tokens); + }; + + ($tokens:ident &) => { + $crate::__private::push_and(&mut $tokens); + }; + + ($tokens:ident &&) => { + $crate::__private::push_and_and(&mut $tokens); + }; + + ($tokens:ident &=) => { + $crate::__private::push_and_eq(&mut $tokens); + }; + + ($tokens:ident @) => { + $crate::__private::push_at(&mut $tokens); + }; + + ($tokens:ident !) => { + $crate::__private::push_bang(&mut $tokens); + }; + + ($tokens:ident ^) => { + $crate::__private::push_caret(&mut $tokens); + }; + + ($tokens:ident ^=) => { + $crate::__private::push_caret_eq(&mut $tokens); + }; + + ($tokens:ident :) => { + $crate::__private::push_colon(&mut $tokens); + }; + + ($tokens:ident ::) => { + $crate::__private::push_colon2(&mut $tokens); + }; + + ($tokens:ident ,) => { + $crate::__private::push_comma(&mut $tokens); + }; + + ($tokens:ident /) => { + $crate::__private::push_div(&mut $tokens); + }; + + ($tokens:ident /=) => { + $crate::__private::push_div_eq(&mut $tokens); + }; + + ($tokens:ident .) => { + $crate::__private::push_dot(&mut $tokens); + }; + + ($tokens:ident ..) => { + $crate::__private::push_dot2(&mut $tokens); + }; + + ($tokens:ident ...) => { + $crate::__private::push_dot3(&mut $tokens); + }; + + ($tokens:ident ..=) => { + $crate::__private::push_dot_dot_eq(&mut $tokens); + }; + + ($tokens:ident =) => { + $crate::__private::push_eq(&mut $tokens); + }; + + ($tokens:ident ==) => { + $crate::__private::push_eq_eq(&mut $tokens); + }; + + ($tokens:ident >=) => { + $crate::__private::push_ge(&mut $tokens); + }; + + ($tokens:ident >) => { + $crate::__private::push_gt(&mut $tokens); + }; + + ($tokens:ident <=) => { + $crate::__private::push_le(&mut $tokens); + }; + + ($tokens:ident <) => { + $crate::__private::push_lt(&mut $tokens); + }; + + ($tokens:ident *=) => { + $crate::__private::push_mul_eq(&mut $tokens); + }; + + ($tokens:ident !=) => { + $crate::__private::push_ne(&mut $tokens); + }; + + ($tokens:ident |) => { + $crate::__private::push_or(&mut $tokens); + }; + + ($tokens:ident |=) => { + $crate::__private::push_or_eq(&mut $tokens); + }; + + ($tokens:ident ||) => { + $crate::__private::push_or_or(&mut $tokens); + }; + + ($tokens:ident #) => { + $crate::__private::push_pound(&mut $tokens); + }; + + ($tokens:ident ?) => { + $crate::__private::push_question(&mut $tokens); + }; + + ($tokens:ident ->) => { + $crate::__private::push_rarrow(&mut $tokens); + }; + + ($tokens:ident <-) => { + $crate::__private::push_larrow(&mut $tokens); + }; + + ($tokens:ident %) => { + $crate::__private::push_rem(&mut $tokens); + }; + + ($tokens:ident %=) => { + $crate::__private::push_rem_eq(&mut $tokens); + }; + + ($tokens:ident =>) => { + $crate::__private::push_fat_arrow(&mut $tokens); + }; + + ($tokens:ident ;) => { + $crate::__private::push_semi(&mut $tokens); + }; + + ($tokens:ident <<) => { + $crate::__private::push_shl(&mut $tokens); + }; + + ($tokens:ident <<=) => { + $crate::__private::push_shl_eq(&mut $tokens); + }; + + ($tokens:ident >>) => { + $crate::__private::push_shr(&mut $tokens); + }; + + ($tokens:ident >>=) => { + $crate::__private::push_shr_eq(&mut $tokens); + }; + + ($tokens:ident *) => { + $crate::__private::push_star(&mut $tokens); + }; + + ($tokens:ident -) => { + $crate::__private::push_sub(&mut $tokens); + }; + + ($tokens:ident -=) => { + $crate::__private::push_sub_eq(&mut $tokens); + }; + + ($tokens:ident $ident:ident) => { + $crate::__private::push_ident(&mut $tokens, stringify!($ident)); + }; + + ($tokens:ident $other:tt) => { + $crate::__private::parse(&mut $tokens, stringify!($other)); + }; +} + +#[macro_export] +#[doc(hidden)] +macro_rules! quote_token_spanned { ($tokens:ident $span:ident ( $($inner:tt)* )) => { - $tokens.extend({ - let mut g = $crate::__private::Group::new( - $crate::__private::Delimiter::Parenthesis, - $crate::quote_spanned!($span=> $($inner)*), - ); - g.set_span($span); - Some($crate::__private::TokenTree::from(g)) - }); + $crate::__private::push_group_spanned( + &mut $tokens, + $span, + $crate::__private::Delimiter::Parenthesis, + $crate::quote_spanned!($span=> $($inner)*), + ); }; ($tokens:ident $span:ident [ $($inner:tt)* ]) => { - $tokens.extend({ - let mut g = $crate::__private::Group::new( - $crate::__private::Delimiter::Bracket, - $crate::quote_spanned!($span=> $($inner)*), - ); - g.set_span($span); - Some($crate::__private::TokenTree::from(g)) - }); + $crate::__private::push_group_spanned( + &mut $tokens, + $span, + $crate::__private::Delimiter::Bracket, + $crate::quote_spanned!($span=> $($inner)*), + ); }; ($tokens:ident $span:ident { $($inner:tt)* }) => { - $tokens.extend({ - let mut g = $crate::__private::Group::new( - $crate::__private::Delimiter::Brace, - $crate::quote_spanned!($span=> $($inner)*), - ); - g.set_span($span); - Some($crate::__private::TokenTree::from(g)) - }); + $crate::__private::push_group_spanned( + &mut $tokens, + $span, + $crate::__private::Delimiter::Brace, + $crate::quote_spanned!($span=> $($inner)*), + ); }; ($tokens:ident $span:ident +) => { - $crate::__private::push_add(&mut $tokens, $span); + $crate::__private::push_add_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident +=) => { - $crate::__private::push_add_eq(&mut $tokens, $span); + $crate::__private::push_add_eq_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident &) => { - $crate::__private::push_and(&mut $tokens, $span); + $crate::__private::push_and_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident &&) => { - $crate::__private::push_and_and(&mut $tokens, $span); + $crate::__private::push_and_and_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident &=) => { - $crate::__private::push_and_eq(&mut $tokens, $span); + $crate::__private::push_and_eq_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident @) => { - $crate::__private::push_at(&mut $tokens, $span); + $crate::__private::push_at_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident !) => { - $crate::__private::push_bang(&mut $tokens, $span); + $crate::__private::push_bang_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident ^) => { - $crate::__private::push_caret(&mut $tokens, $span); + $crate::__private::push_caret_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident ^=) => { - $crate::__private::push_caret_eq(&mut $tokens, $span); + $crate::__private::push_caret_eq_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident :) => { - $crate::__private::push_colon(&mut $tokens, $span); + $crate::__private::push_colon_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident ::) => { - $crate::__private::push_colon2(&mut $tokens, $span); + $crate::__private::push_colon2_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident ,) => { - $crate::__private::push_comma(&mut $tokens, $span); + $crate::__private::push_comma_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident /) => { - $crate::__private::push_div(&mut $tokens, $span); + $crate::__private::push_div_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident /=) => { - $crate::__private::push_div_eq(&mut $tokens, $span); + $crate::__private::push_div_eq_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident .) => { - $crate::__private::push_dot(&mut $tokens, $span); + $crate::__private::push_dot_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident ..) => { - $crate::__private::push_dot2(&mut $tokens, $span); + $crate::__private::push_dot2_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident ...) => { - $crate::__private::push_dot3(&mut $tokens, $span); + $crate::__private::push_dot3_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident ..=) => { - $crate::__private::push_dot_dot_eq(&mut $tokens, $span); + $crate::__private::push_dot_dot_eq_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident =) => { - $crate::__private::push_eq(&mut $tokens, $span); + $crate::__private::push_eq_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident ==) => { - $crate::__private::push_eq_eq(&mut $tokens, $span); + $crate::__private::push_eq_eq_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident >=) => { - $crate::__private::push_ge(&mut $tokens, $span); + $crate::__private::push_ge_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident >) => { - $crate::__private::push_gt(&mut $tokens, $span); + $crate::__private::push_gt_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident <=) => { - $crate::__private::push_le(&mut $tokens, $span); + $crate::__private::push_le_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident <) => { - $crate::__private::push_lt(&mut $tokens, $span); + $crate::__private::push_lt_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident *=) => { - $crate::__private::push_mul_eq(&mut $tokens, $span); + $crate::__private::push_mul_eq_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident !=) => { - $crate::__private::push_ne(&mut $tokens, $span); + $crate::__private::push_ne_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident |) => { - $crate::__private::push_or(&mut $tokens, $span); + $crate::__private::push_or_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident |=) => { - $crate::__private::push_or_eq(&mut $tokens, $span); + $crate::__private::push_or_eq_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident ||) => { - $crate::__private::push_or_or(&mut $tokens, $span); + $crate::__private::push_or_or_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident #) => { - $crate::__private::push_pound(&mut $tokens, $span); + $crate::__private::push_pound_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident ?) => { - $crate::__private::push_question(&mut $tokens, $span); + $crate::__private::push_question_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident ->) => { - $crate::__private::push_rarrow(&mut $tokens, $span); + $crate::__private::push_rarrow_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident <-) => { - $crate::__private::push_larrow(&mut $tokens, $span); + $crate::__private::push_larrow_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident %) => { - $crate::__private::push_rem(&mut $tokens, $span); + $crate::__private::push_rem_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident %=) => { - $crate::__private::push_rem_eq(&mut $tokens, $span); + $crate::__private::push_rem_eq_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident =>) => { - $crate::__private::push_fat_arrow(&mut $tokens, $span); + $crate::__private::push_fat_arrow_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident ;) => { - $crate::__private::push_semi(&mut $tokens, $span); + $crate::__private::push_semi_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident <<) => { - $crate::__private::push_shl(&mut $tokens, $span); + $crate::__private::push_shl_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident <<=) => { - $crate::__private::push_shl_eq(&mut $tokens, $span); + $crate::__private::push_shl_eq_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident >>) => { - $crate::__private::push_shr(&mut $tokens, $span); + $crate::__private::push_shr_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident >>=) => { - $crate::__private::push_shr_eq(&mut $tokens, $span); + $crate::__private::push_shr_eq_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident *) => { - $crate::__private::push_star(&mut $tokens, $span); + $crate::__private::push_star_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident -) => { - $crate::__private::push_sub(&mut $tokens, $span); + $crate::__private::push_sub_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident -=) => { - $crate::__private::push_sub_eq(&mut $tokens, $span); + $crate::__private::push_sub_eq_spanned(&mut $tokens, $span); }; ($tokens:ident $span:ident $ident:ident) => { - $crate::__private::push_ident(&mut $tokens, $span, stringify!($ident)); + $crate::__private::push_ident_spanned(&mut $tokens, $span, stringify!($ident)); }; ($tokens:ident $span:ident $other:tt) => { - $crate::__private::parse(&mut $tokens, $span, stringify!($other)); + $crate::__private::parse_spanned(&mut $tokens, $span, stringify!($other)); }; } diff -Nru cargo-0.44.1/vendor/quote/src/runtime.rs cargo-0.47.0/vendor/quote/src/runtime.rs --- cargo-0.44.1/vendor/quote/src/runtime.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/quote/src/runtime.rs 2020-10-01 21:38:28.000000000 +0000 @@ -180,7 +180,27 @@ } } -pub fn parse(tokens: &mut TokenStream, span: Span, s: &str) { +pub fn push_group(tokens: &mut TokenStream, delimiter: Delimiter, inner: TokenStream) { + tokens.append(Group::new(delimiter, inner)); +} + +pub fn push_group_spanned( + tokens: &mut TokenStream, + span: Span, + delimiter: Delimiter, + inner: TokenStream, +) { + let mut g = Group::new(delimiter, inner); + g.set_span(span); + tokens.append(g); +} + +pub fn parse(tokens: &mut TokenStream, s: &str) { + let s: TokenStream = s.parse().expect("invalid token stream"); + tokens.extend(s); +} + +pub fn parse_spanned(tokens: &mut TokenStream, span: Span, s: &str) { let s: TokenStream = s.parse().expect("invalid token stream"); tokens.extend(s.into_iter().map(|mut t| { t.set_span(span); @@ -188,28 +208,47 @@ })); } -pub fn push_ident(tokens: &mut TokenStream, span: Span, s: &str) { +pub fn push_ident(tokens: &mut TokenStream, s: &str) { // Optimization over `mk_ident`, as `s` is guaranteed to be a valid ident. // // FIXME: When `Ident::new_raw` becomes stable, this method should be // updated to call it when available. if s.starts_with("r#") { - parse(tokens, span, s); + parse(tokens, s); + } else { + tokens.append(Ident::new(s, Span::call_site())); + } +} + +pub fn push_ident_spanned(tokens: &mut TokenStream, span: Span, s: &str) { + // Optimization over `mk_ident`, as `s` is guaranteed to be a valid ident. + // + // FIXME: When `Ident::new_raw` becomes stable, this method should be + // updated to call it when available. + if s.starts_with("r#") { + parse_spanned(tokens, span, s); } else { tokens.append(Ident::new(s, span)); } } macro_rules! push_punct { - ($name:ident $char1:tt) => { - pub fn $name(tokens: &mut TokenStream, span: Span) { + ($name:ident $spanned:ident $char1:tt) => { + pub fn $name(tokens: &mut TokenStream) { + tokens.append(Punct::new($char1, Spacing::Alone)); + } + pub fn $spanned(tokens: &mut TokenStream, span: Span) { let mut punct = Punct::new($char1, Spacing::Alone); punct.set_span(span); tokens.append(punct); } }; - ($name:ident $char1:tt $char2:tt) => { - pub fn $name(tokens: &mut TokenStream, span: Span) { + ($name:ident $spanned:ident $char1:tt $char2:tt) => { + pub fn $name(tokens: &mut TokenStream) { + tokens.append(Punct::new($char1, Spacing::Joint)); + tokens.append(Punct::new($char2, Spacing::Alone)); + } + pub fn $spanned(tokens: &mut TokenStream, span: Span) { let mut punct = Punct::new($char1, Spacing::Joint); punct.set_span(span); tokens.append(punct); @@ -218,8 +257,13 @@ tokens.append(punct); } }; - ($name:ident $char1:tt $char2:tt $char3:tt) => { - pub fn $name(tokens: &mut TokenStream, span: Span) { + ($name:ident $spanned:ident $char1:tt $char2:tt $char3:tt) => { + pub fn $name(tokens: &mut TokenStream) { + tokens.append(Punct::new($char1, Spacing::Joint)); + tokens.append(Punct::new($char2, Spacing::Joint)); + tokens.append(Punct::new($char3, Spacing::Alone)); + } + pub fn $spanned(tokens: &mut TokenStream, span: Span) { let mut punct = Punct::new($char1, Spacing::Joint); punct.set_span(span); tokens.append(punct); @@ -233,50 +277,50 @@ }; } -push_punct!(push_add '+'); -push_punct!(push_add_eq '+' '='); -push_punct!(push_and '&'); -push_punct!(push_and_and '&' '&'); -push_punct!(push_and_eq '&' '='); -push_punct!(push_at '@'); -push_punct!(push_bang '!'); -push_punct!(push_caret '^'); -push_punct!(push_caret_eq '^' '='); -push_punct!(push_colon ':'); -push_punct!(push_colon2 ':' ':'); -push_punct!(push_comma ','); -push_punct!(push_div '/'); -push_punct!(push_div_eq '/' '='); -push_punct!(push_dot '.'); -push_punct!(push_dot2 '.' '.'); -push_punct!(push_dot3 '.' '.' '.'); -push_punct!(push_dot_dot_eq '.' '.' '='); -push_punct!(push_eq '='); -push_punct!(push_eq_eq '=' '='); -push_punct!(push_ge '>' '='); -push_punct!(push_gt '>'); -push_punct!(push_le '<' '='); -push_punct!(push_lt '<'); -push_punct!(push_mul_eq '*' '='); -push_punct!(push_ne '!' '='); -push_punct!(push_or '|'); -push_punct!(push_or_eq '|' '='); -push_punct!(push_or_or '|' '|'); -push_punct!(push_pound '#'); -push_punct!(push_question '?'); -push_punct!(push_rarrow '-' '>'); -push_punct!(push_larrow '<' '-'); -push_punct!(push_rem '%'); -push_punct!(push_rem_eq '%' '='); -push_punct!(push_fat_arrow '=' '>'); -push_punct!(push_semi ';'); -push_punct!(push_shl '<' '<'); -push_punct!(push_shl_eq '<' '<' '='); -push_punct!(push_shr '>' '>'); -push_punct!(push_shr_eq '>' '>' '='); -push_punct!(push_star '*'); -push_punct!(push_sub '-'); -push_punct!(push_sub_eq '-' '='); +push_punct!(push_add push_add_spanned '+'); +push_punct!(push_add_eq push_add_eq_spanned '+' '='); +push_punct!(push_and push_and_spanned '&'); +push_punct!(push_and_and push_and_and_spanned '&' '&'); +push_punct!(push_and_eq push_and_eq_spanned '&' '='); +push_punct!(push_at push_at_spanned '@'); +push_punct!(push_bang push_bang_spanned '!'); +push_punct!(push_caret push_caret_spanned '^'); +push_punct!(push_caret_eq push_caret_eq_spanned '^' '='); +push_punct!(push_colon push_colon_spanned ':'); +push_punct!(push_colon2 push_colon2_spanned ':' ':'); +push_punct!(push_comma push_comma_spanned ','); +push_punct!(push_div push_div_spanned '/'); +push_punct!(push_div_eq push_div_eq_spanned '/' '='); +push_punct!(push_dot push_dot_spanned '.'); +push_punct!(push_dot2 push_dot2_spanned '.' '.'); +push_punct!(push_dot3 push_dot3_spanned '.' '.' '.'); +push_punct!(push_dot_dot_eq push_dot_dot_eq_spanned '.' '.' '='); +push_punct!(push_eq push_eq_spanned '='); +push_punct!(push_eq_eq push_eq_eq_spanned '=' '='); +push_punct!(push_ge push_ge_spanned '>' '='); +push_punct!(push_gt push_gt_spanned '>'); +push_punct!(push_le push_le_spanned '<' '='); +push_punct!(push_lt push_lt_spanned '<'); +push_punct!(push_mul_eq push_mul_eq_spanned '*' '='); +push_punct!(push_ne push_ne_spanned '!' '='); +push_punct!(push_or push_or_spanned '|'); +push_punct!(push_or_eq push_or_eq_spanned '|' '='); +push_punct!(push_or_or push_or_or_spanned '|' '|'); +push_punct!(push_pound push_pound_spanned '#'); +push_punct!(push_question push_question_spanned '?'); +push_punct!(push_rarrow push_rarrow_spanned '-' '>'); +push_punct!(push_larrow push_larrow_spanned '<' '-'); +push_punct!(push_rem push_rem_spanned '%'); +push_punct!(push_rem_eq push_rem_eq_spanned '%' '='); +push_punct!(push_fat_arrow push_fat_arrow_spanned '=' '>'); +push_punct!(push_semi push_semi_spanned ';'); +push_punct!(push_shl push_shl_spanned '<' '<'); +push_punct!(push_shl_eq push_shl_eq_spanned '<' '<' '='); +push_punct!(push_shr push_shr_spanned '>' '>'); +push_punct!(push_shr_eq push_shr_eq_spanned '>' '>' '='); +push_punct!(push_star push_star_spanned '*'); +push_punct!(push_sub push_sub_spanned '-'); +push_punct!(push_sub_eq push_sub_eq_spanned '-' '='); // Helper method for constructing identifiers from the `format_ident!` macro, // handling `r#` prefixes. diff -Nru cargo-0.44.1/vendor/quote/tests/test.rs cargo-0.47.0/vendor/quote/tests/test.rs --- cargo-0.44.1/vendor/quote/tests/test.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/quote/tests/test.rs 2020-10-01 21:38:28.000000000 +0000 @@ -4,7 +4,7 @@ use std::collections::BTreeSet; use proc_macro2::{Ident, Span, TokenStream}; -use quote::{format_ident, quote, TokenStreamExt}; +use quote::{format_ident, quote, quote_spanned, TokenStreamExt}; struct X; @@ -36,6 +36,28 @@ } #[test] +fn test_quote_spanned_impl() { + let span = Span::call_site(); + let tokens = quote_spanned! {span=> + impl<'a, T: ToTokens> ToTokens for &'a T { + fn to_tokens(&self, tokens: &mut TokenStream) { + (**self).to_tokens(tokens) + } + } + }; + + let expected = concat!( + "impl < 'a , T : ToTokens > ToTokens for & 'a T { ", + "fn to_tokens ( & self , tokens : & mut TokenStream ) { ", + "( * * self ) . to_tokens ( tokens ) ", + "} ", + "}" + ); + + assert_eq!(expected, tokens.to_string()); +} + +#[test] fn test_substitution() { let x = X; let tokens = quote!(#x <#x> (#x) [#x] {#x}); @@ -130,7 +152,7 @@ #ii8 #ii16 #ii32 #ii64 #ii128 #iisize #uu8 #uu16 #uu32 #uu64 #uu128 #uusize }; - let expected = "-1i8 -1i16 -1i32 -1i64 -1i128 -1isize 1u8 1u16 1u32 1u64 1u128 1usize"; + let expected = "- 1i8 - 1i16 - 1i32 - 1i64 - 1i128 - 1isize 1u8 1u16 1u32 1u64 1u128 1usize"; assert_eq!(expected, tokens.to_string()); } diff -Nru cargo-0.44.1/vendor/rand/benches/generators.rs cargo-0.47.0/vendor/rand/benches/generators.rs --- cargo-0.44.1/vendor/rand/benches/generators.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/rand/benches/generators.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,165 +0,0 @@ -// Copyright 2018 Developers of the Rand project. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![feature(test)] -#![allow(non_snake_case)] - -extern crate test; - -const RAND_BENCH_N: u64 = 1000; -const BYTES_LEN: usize = 1024; - -use std::mem::size_of; -use test::{black_box, Bencher}; - -use rand::prelude::*; -use rand::rngs::adapter::ReseedingRng; -use rand::rngs::{mock::StepRng, OsRng}; -use rand_chacha::{ChaCha12Rng, ChaCha20Core, ChaCha20Rng, ChaCha8Rng}; -use rand_hc::Hc128Rng; -use rand_pcg::{Pcg32, Pcg64, Pcg64Mcg}; - -macro_rules! gen_bytes { - ($fnn:ident, $gen:expr) => { - #[bench] - fn $fnn(b: &mut Bencher) { - let mut rng = $gen; - let mut buf = [0u8; BYTES_LEN]; - b.iter(|| { - for _ in 0..RAND_BENCH_N { - rng.fill_bytes(&mut buf); - black_box(buf); - } - }); - b.bytes = BYTES_LEN as u64 * RAND_BENCH_N; - } - }; -} - -gen_bytes!(gen_bytes_step, StepRng::new(0, 1)); -gen_bytes!(gen_bytes_pcg32, Pcg32::from_entropy()); -gen_bytes!(gen_bytes_pcg64, Pcg64::from_entropy()); -gen_bytes!(gen_bytes_pcg64mcg, Pcg64Mcg::from_entropy()); -gen_bytes!(gen_bytes_chacha8, ChaCha8Rng::from_entropy()); -gen_bytes!(gen_bytes_chacha12, ChaCha12Rng::from_entropy()); -gen_bytes!(gen_bytes_chacha20, ChaCha20Rng::from_entropy()); -gen_bytes!(gen_bytes_hc128, Hc128Rng::from_entropy()); -gen_bytes!(gen_bytes_std, StdRng::from_entropy()); -#[cfg(feature = "small_rng")] -gen_bytes!(gen_bytes_small, SmallRng::from_entropy()); -gen_bytes!(gen_bytes_os, OsRng); - -macro_rules! gen_uint { - ($fnn:ident, $ty:ty, $gen:expr) => { - #[bench] - fn $fnn(b: &mut Bencher) { - let mut rng = $gen; - b.iter(|| { - let mut accum: $ty = 0; - for _ in 0..RAND_BENCH_N { - accum = accum.wrapping_add(rng.gen::<$ty>()); - } - accum - }); - b.bytes = size_of::<$ty>() as u64 * RAND_BENCH_N; - } - }; -} - -gen_uint!(gen_u32_step, u32, StepRng::new(0, 1)); -gen_uint!(gen_u32_pcg32, u32, Pcg32::from_entropy()); -gen_uint!(gen_u32_pcg64, u32, Pcg64::from_entropy()); -gen_uint!(gen_u32_pcg64mcg, u32, Pcg64Mcg::from_entropy()); -gen_uint!(gen_u32_chacha8, u32, ChaCha8Rng::from_entropy()); -gen_uint!(gen_u32_chacha12, u32, ChaCha12Rng::from_entropy()); -gen_uint!(gen_u32_chacha20, u32, ChaCha20Rng::from_entropy()); -gen_uint!(gen_u32_hc128, u32, Hc128Rng::from_entropy()); -gen_uint!(gen_u32_std, u32, StdRng::from_entropy()); -#[cfg(feature = "small_rng")] -gen_uint!(gen_u32_small, u32, SmallRng::from_entropy()); -gen_uint!(gen_u32_os, u32, OsRng); - -gen_uint!(gen_u64_step, u64, StepRng::new(0, 1)); -gen_uint!(gen_u64_pcg32, u64, Pcg32::from_entropy()); -gen_uint!(gen_u64_pcg64, u64, Pcg64::from_entropy()); -gen_uint!(gen_u64_pcg64mcg, u64, Pcg64Mcg::from_entropy()); -gen_uint!(gen_u64_chacha8, u64, ChaCha8Rng::from_entropy()); -gen_uint!(gen_u64_chacha12, u64, ChaCha12Rng::from_entropy()); -gen_uint!(gen_u64_chacha20, u64, ChaCha20Rng::from_entropy()); -gen_uint!(gen_u64_hc128, u64, Hc128Rng::from_entropy()); -gen_uint!(gen_u64_std, u64, StdRng::from_entropy()); -#[cfg(feature = "small_rng")] -gen_uint!(gen_u64_small, u64, SmallRng::from_entropy()); -gen_uint!(gen_u64_os, u64, OsRng); - -macro_rules! init_gen { - ($fnn:ident, $gen:ident) => { - #[bench] - fn $fnn(b: &mut Bencher) { - let mut rng = Pcg32::from_entropy(); - b.iter(|| { - let r2 = $gen::from_rng(&mut rng).unwrap(); - r2 - }); - } - }; -} - -init_gen!(init_pcg32, Pcg32); -init_gen!(init_pcg64, Pcg64); -init_gen!(init_pcg64mcg, Pcg64Mcg); -init_gen!(init_hc128, Hc128Rng); -init_gen!(init_chacha, ChaCha20Rng); - -const RESEEDING_BYTES_LEN: usize = 1024 * 1024; -const RESEEDING_BENCH_N: u64 = 16; - -macro_rules! reseeding_bytes { - ($fnn:ident, $thresh:expr) => { - #[bench] - fn $fnn(b: &mut Bencher) { - let mut rng = ReseedingRng::new(ChaCha20Core::from_entropy(), $thresh * 1024, OsRng); - let mut buf = [0u8; RESEEDING_BYTES_LEN]; - b.iter(|| { - for _ in 0..RESEEDING_BENCH_N { - rng.fill_bytes(&mut buf); - black_box(&buf); - } - }); - b.bytes = RESEEDING_BYTES_LEN as u64 * RESEEDING_BENCH_N; - } - }; -} - -reseeding_bytes!(reseeding_chacha20_4k, 4); -reseeding_bytes!(reseeding_chacha20_16k, 16); -reseeding_bytes!(reseeding_chacha20_32k, 32); -reseeding_bytes!(reseeding_chacha20_64k, 64); -reseeding_bytes!(reseeding_chacha20_256k, 256); -reseeding_bytes!(reseeding_chacha20_1M, 1024); - - -macro_rules! threadrng_uint { - ($fnn:ident, $ty:ty) => { - #[bench] - fn $fnn(b: &mut Bencher) { - let mut rng = thread_rng(); - b.iter(|| { - let mut accum: $ty = 0; - for _ in 0..RAND_BENCH_N { - accum = accum.wrapping_add(rng.gen::<$ty>()); - } - accum - }); - b.bytes = size_of::<$ty>() as u64 * RAND_BENCH_N; - } - }; -} - -threadrng_uint!(thread_rng_u32, u32); -threadrng_uint!(thread_rng_u64, u64); diff -Nru cargo-0.44.1/vendor/rand/benches/misc.rs cargo-0.47.0/vendor/rand/benches/misc.rs --- cargo-0.44.1/vendor/rand/benches/misc.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/rand/benches/misc.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,140 +0,0 @@ -// Copyright 2018 Developers of the Rand project. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![feature(test)] - -extern crate test; - -const RAND_BENCH_N: u64 = 1000; - -use test::Bencher; - -use rand::distributions::{Bernoulli, Distribution, Standard}; -use rand::prelude::*; -use rand_pcg::{Pcg32, Pcg64Mcg}; - -#[bench] -fn misc_gen_bool_const(b: &mut Bencher) { - let mut rng = Pcg32::from_rng(&mut thread_rng()).unwrap(); - b.iter(|| { - let mut accum = true; - for _ in 0..crate::RAND_BENCH_N { - accum ^= rng.gen_bool(0.18); - } - accum - }) -} - -#[bench] -fn misc_gen_bool_var(b: &mut Bencher) { - let mut rng = Pcg32::from_rng(&mut thread_rng()).unwrap(); - b.iter(|| { - let mut accum = true; - let mut p = 0.18; - for _ in 0..crate::RAND_BENCH_N { - accum ^= rng.gen_bool(p); - p += 0.0001; - } - accum - }) -} - -#[bench] -fn misc_gen_ratio_const(b: &mut Bencher) { - let mut rng = Pcg32::from_rng(&mut thread_rng()).unwrap(); - b.iter(|| { - let mut accum = true; - for _ in 0..crate::RAND_BENCH_N { - accum ^= rng.gen_ratio(2, 3); - } - accum - }) -} - -#[bench] -fn misc_gen_ratio_var(b: &mut Bencher) { - let mut rng = Pcg32::from_rng(&mut thread_rng()).unwrap(); - b.iter(|| { - let mut accum = true; - for i in 2..(crate::RAND_BENCH_N as u32 + 2) { - accum ^= rng.gen_ratio(i, i + 1); - } - accum - }) -} - -#[bench] -fn misc_bernoulli_const(b: &mut Bencher) { - let mut rng = Pcg32::from_rng(&mut thread_rng()).unwrap(); - b.iter(|| { - let d = rand::distributions::Bernoulli::new(0.18).unwrap(); - let mut accum = true; - for _ in 0..crate::RAND_BENCH_N { - accum ^= rng.sample(d); - } - accum - }) -} - -#[bench] -fn misc_bernoulli_var(b: &mut Bencher) { - let mut rng = Pcg32::from_rng(&mut thread_rng()).unwrap(); - b.iter(|| { - let mut accum = true; - let mut p = 0.18; - for _ in 0..crate::RAND_BENCH_N { - let d = Bernoulli::new(p).unwrap(); - accum ^= rng.sample(d); - p += 0.0001; - } - accum - }) -} - -#[bench] -fn gen_1k_iter_repeat(b: &mut Bencher) { - use std::iter; - let mut rng = Pcg64Mcg::from_rng(&mut thread_rng()).unwrap(); - b.iter(|| { - let v: Vec = iter::repeat(()).map(|()| rng.gen()).take(128).collect(); - v - }); - b.bytes = 1024; -} - -#[bench] -fn gen_1k_sample_iter(b: &mut Bencher) { - let mut rng = Pcg64Mcg::from_rng(&mut thread_rng()).unwrap(); - b.iter(|| { - let v: Vec = Standard.sample_iter(&mut rng).take(128).collect(); - v - }); - b.bytes = 1024; -} - -#[bench] -fn gen_1k_gen_array(b: &mut Bencher) { - let mut rng = Pcg64Mcg::from_rng(&mut thread_rng()).unwrap(); - b.iter(|| { - // max supported array length is 32! - let v: [[u64; 32]; 4] = rng.gen(); - v - }); - b.bytes = 1024; -} - -#[bench] -fn gen_1k_fill(b: &mut Bencher) { - let mut rng = Pcg64Mcg::from_rng(&mut thread_rng()).unwrap(); - let mut buf = [0u64; 128]; - b.iter(|| { - rng.fill(&mut buf[..]); - buf - }); - b.bytes = 1024; -} diff -Nru cargo-0.44.1/vendor/rand/benches/seq.rs cargo-0.47.0/vendor/rand/benches/seq.rs --- cargo-0.44.1/vendor/rand/benches/seq.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/rand/benches/seq.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,179 +0,0 @@ -// Copyright 2018 Developers of the Rand project. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![feature(test)] -#![allow(non_snake_case)] - -extern crate test; - -use test::Bencher; - -use rand::prelude::*; -use rand::seq::*; -use std::mem::size_of; - -// We force use of 32-bit RNG since seq code is optimised for use with 32-bit -// generators on all platforms. -use rand_pcg::Pcg32 as SmallRng; - -const RAND_BENCH_N: u64 = 1000; - -#[bench] -fn seq_shuffle_100(b: &mut Bencher) { - let mut rng = SmallRng::from_rng(thread_rng()).unwrap(); - let x: &mut [usize] = &mut [1; 100]; - b.iter(|| { - x.shuffle(&mut rng); - x[0] - }) -} - -#[bench] -fn seq_slice_choose_1_of_1000(b: &mut Bencher) { - let mut rng = SmallRng::from_rng(thread_rng()).unwrap(); - let x: &mut [usize] = &mut [1; 1000]; - for i in 0..1000 { - x[i] = i; - } - b.iter(|| { - let mut s = 0; - for _ in 0..RAND_BENCH_N { - s += x.choose(&mut rng).unwrap(); - } - s - }); - b.bytes = size_of::() as u64 * crate::RAND_BENCH_N; -} - -macro_rules! seq_slice_choose_multiple { - ($name:ident, $amount:expr, $length:expr) => { - #[bench] - fn $name(b: &mut Bencher) { - let mut rng = SmallRng::from_rng(thread_rng()).unwrap(); - let x: &[i32] = &[$amount; $length]; - let mut result = [0i32; $amount]; - b.iter(|| { - // Collect full result to prevent unwanted shortcuts getting - // first element (in case sample_indices returns an iterator). - for (slot, sample) in result.iter_mut().zip(x.choose_multiple(&mut rng, $amount)) { - *slot = *sample; - } - result[$amount - 1] - }) - } - }; -} - -seq_slice_choose_multiple!(seq_slice_choose_multiple_1_of_1000, 1, 1000); -seq_slice_choose_multiple!(seq_slice_choose_multiple_950_of_1000, 950, 1000); -seq_slice_choose_multiple!(seq_slice_choose_multiple_10_of_100, 10, 100); -seq_slice_choose_multiple!(seq_slice_choose_multiple_90_of_100, 90, 100); - -#[bench] -fn seq_iter_choose_from_1000(b: &mut Bencher) { - let mut rng = SmallRng::from_rng(thread_rng()).unwrap(); - let x: &mut [usize] = &mut [1; 1000]; - for i in 0..1000 { - x[i] = i; - } - b.iter(|| { - let mut s = 0; - for _ in 0..RAND_BENCH_N { - s += x.iter().choose(&mut rng).unwrap(); - } - s - }); - b.bytes = size_of::() as u64 * crate::RAND_BENCH_N; -} - -#[derive(Clone)] -struct UnhintedIterator { - iter: I, -} -impl Iterator for UnhintedIterator { - type Item = I::Item; - - fn next(&mut self) -> Option { - self.iter.next() - } -} - -#[derive(Clone)] -struct WindowHintedIterator { - iter: I, - window_size: usize, -} -impl Iterator for WindowHintedIterator { - type Item = I::Item; - - fn next(&mut self) -> Option { - self.iter.next() - } - - fn size_hint(&self) -> (usize, Option) { - (std::cmp::min(self.iter.len(), self.window_size), None) - } -} - -#[bench] -fn seq_iter_unhinted_choose_from_1000(b: &mut Bencher) { - let mut rng = SmallRng::from_rng(thread_rng()).unwrap(); - let x: &[usize] = &[1; 1000]; - b.iter(|| { - UnhintedIterator { iter: x.iter() } - .choose(&mut rng) - .unwrap() - }) -} - -#[bench] -fn seq_iter_window_hinted_choose_from_1000(b: &mut Bencher) { - let mut rng = SmallRng::from_rng(thread_rng()).unwrap(); - let x: &[usize] = &[1; 1000]; - b.iter(|| { - WindowHintedIterator { - iter: x.iter(), - window_size: 7, - } - .choose(&mut rng) - }) -} - -#[bench] -fn seq_iter_choose_multiple_10_of_100(b: &mut Bencher) { - let mut rng = SmallRng::from_rng(thread_rng()).unwrap(); - let x: &[usize] = &[1; 100]; - b.iter(|| x.iter().cloned().choose_multiple(&mut rng, 10)) -} - -#[bench] -fn seq_iter_choose_multiple_fill_10_of_100(b: &mut Bencher) { - let mut rng = SmallRng::from_rng(thread_rng()).unwrap(); - let x: &[usize] = &[1; 100]; - let mut buf = [0; 10]; - b.iter(|| x.iter().cloned().choose_multiple_fill(&mut rng, &mut buf)) -} - -macro_rules! sample_indices { - ($name:ident, $fn:ident, $amount:expr, $length:expr) => { - #[bench] - fn $name(b: &mut Bencher) { - let mut rng = SmallRng::from_rng(thread_rng()).unwrap(); - b.iter(|| index::$fn(&mut rng, $length, $amount)) - } - }; -} - -sample_indices!(misc_sample_indices_1_of_1k, sample, 1, 1000); -sample_indices!(misc_sample_indices_10_of_1k, sample, 10, 1000); -sample_indices!(misc_sample_indices_100_of_1k, sample, 100, 1000); -sample_indices!(misc_sample_indices_100_of_1M, sample, 100, 1000_000); -sample_indices!(misc_sample_indices_100_of_1G, sample, 100, 1000_000_000); -sample_indices!(misc_sample_indices_200_of_1G, sample, 200, 1000_000_000); -sample_indices!(misc_sample_indices_400_of_1G, sample, 400, 1000_000_000); -sample_indices!(misc_sample_indices_600_of_1G, sample, 600, 1000_000_000); diff -Nru cargo-0.44.1/vendor/rand/benches/weighted.rs cargo-0.47.0/vendor/rand/benches/weighted.rs --- cargo-0.44.1/vendor/rand/benches/weighted.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/rand/benches/weighted.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -// Copyright 2019 Developers of the Rand project. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![feature(test)] - -extern crate test; - -use rand::distributions::WeightedIndex; -use rand::Rng; -use test::Bencher; - -#[bench] -fn weighted_index_creation(b: &mut Bencher) { - let mut rng = rand::thread_rng(); - let weights = [1u32, 2, 4, 0, 5, 1, 7, 1, 2, 3, 4, 5, 6, 7]; - b.iter(|| { - let distr = WeightedIndex::new(weights.to_vec()).unwrap(); - rng.sample(distr) - }) -} - -#[bench] -fn weighted_index_modification(b: &mut Bencher) { - let mut rng = rand::thread_rng(); - let weights = [1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7]; - let mut distr = WeightedIndex::new(weights.to_vec()).unwrap(); - b.iter(|| { - distr.update_weights(&[(2, &4), (5, &1)]).unwrap(); - rng.sample(&distr) - }) -} diff -Nru cargo-0.44.1/vendor/redox_syscall/.cargo-checksum.json cargo-0.47.0/vendor/redox_syscall/.cargo-checksum.json --- cargo-0.44.1/vendor/redox_syscall/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/redox_syscall/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"} \ No newline at end of file +{"files":{},"package":"41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/redox_syscall/Cargo.toml cargo-0.47.0/vendor/redox_syscall/Cargo.toml --- cargo-0.44.1/vendor/redox_syscall/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/redox_syscall/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "redox_syscall" -version = "0.1.56" +version = "0.1.57" authors = ["Jeremy Soller "] description = "A Rust library to access raw Redox system calls" documentation = "https://docs.rs/redox_syscall" diff -Nru cargo-0.44.1/vendor/redox_syscall/debian/patches/no-nightly.patch cargo-0.47.0/vendor/redox_syscall/debian/patches/no-nightly.patch --- cargo-0.44.1/vendor/redox_syscall/debian/patches/no-nightly.patch 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/redox_syscall/debian/patches/no-nightly.patch 2020-10-01 21:38:28.000000000 +0000 @@ -18,9 +18,9 @@ --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,5 @@ --#![feature(asm)] +-#![feature(llvm_asm)] -#![feature(const_fn)] -+#![cfg_attr(nightly, feature(asm))] ++#![cfg_attr(nightly, feature(llvm_asm))] +#![cfg_attr(nightly, feature(const_fn))] #![cfg_attr(not(test), no_std)] diff -Nru cargo-0.44.1/vendor/redox_syscall/.pc/no-nightly.patch/src/lib.rs cargo-0.47.0/vendor/redox_syscall/.pc/no-nightly.patch/src/lib.rs --- cargo-0.44.1/vendor/redox_syscall/.pc/no-nightly.patch/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/redox_syscall/.pc/no-nightly.patch/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,4 @@ -#![feature(asm)] +#![feature(llvm_asm)] #![feature(const_fn)] #![cfg_attr(not(test), no_std)] diff -Nru cargo-0.44.1/vendor/redox_syscall/src/arch/aarch64.rs cargo-0.47.0/vendor/redox_syscall/src/arch/aarch64.rs --- cargo-0.44.1/vendor/redox_syscall/src/arch/aarch64.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/redox_syscall/src/arch/aarch64.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,7 +1,7 @@ use super::error::{Error, Result}; pub unsafe fn syscall0(mut a: usize) -> Result { - asm!("svc 0" + llvm_asm!("svc 0" : "={x0}"(a) : "{x8}"(a) : "x0", "x8" @@ -11,7 +11,7 @@ } pub unsafe fn syscall1(mut a: usize, b: usize) -> Result { - asm!("svc 0" + llvm_asm!("svc 0" : "={x0}"(a) : "{x8}"(a), "{x0}"(b) : "x0", "x8" @@ -22,7 +22,7 @@ // Clobbers all registers - special for clone pub unsafe fn syscall1_clobber(mut a: usize, b: usize) -> Result { - asm!("svc 0" + llvm_asm!("svc 0" : "={x0}"(a) : "{x8}"(a), "{x0}"(b) : "memory", @@ -36,7 +36,7 @@ } pub unsafe fn syscall2(mut a: usize, b: usize, c: usize) -> Result { - asm!("svc 0" + llvm_asm!("svc 0" : "={x0}"(a) : "{x8}"(a), "{x0}"(b), "{x1}"(c) : "x0", "x1", "x8" @@ -46,7 +46,7 @@ } pub unsafe fn syscall3(mut a: usize, b: usize, c: usize, d: usize) -> Result { - asm!("svc 0" + llvm_asm!("svc 0" : "={x0}"(a) : "{x8}"(a), "{x0}"(b), "{x1}"(c), "{x2}"(d) : "x0", "x1", "x2", "x8" @@ -56,7 +56,7 @@ } pub unsafe fn syscall4(mut a: usize, b: usize, c: usize, d: usize, e: usize) -> Result { - asm!("svc 0" + llvm_asm!("svc 0" : "={x0}"(a) : "{x8}"(a), "{x0}"(b), "{x1}"(c), "{x2}"(d), "{x3}"(e) : "x0", "x1", "x2", "x3", "x8" @@ -67,7 +67,7 @@ pub unsafe fn syscall5(mut a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) -> Result { - asm!("svc 0" + llvm_asm!("svc 0" : "={x0}"(a) : "{x8}"(a), "{x0}"(b), "{x1}"(c), "{x2}"(d), "{x3}"(e), "{x4}"(f) : "x0", "x1", "x2", "x3", "x4", "x8" diff -Nru cargo-0.44.1/vendor/redox_syscall/src/arch/arm.rs cargo-0.47.0/vendor/redox_syscall/src/arch/arm.rs --- cargo-0.44.1/vendor/redox_syscall/src/arch/arm.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/redox_syscall/src/arch/arm.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,7 +1,7 @@ use super::error::{Error, Result}; pub unsafe fn syscall0(mut a: usize) -> Result { - asm!("swi $$0" + llvm_asm!("swi $$0" : "={r0}"(a) : "{r7}"(a) : "memory" @@ -11,7 +11,7 @@ } pub unsafe fn syscall1(mut a: usize, b: usize) -> Result { - asm!("swi $$0" + llvm_asm!("swi $$0" : "={r0}"(a) : "{r7}"(a), "{r0}"(b) : "memory" @@ -22,7 +22,7 @@ // Clobbers all registers - special for clone pub unsafe fn syscall1_clobber(mut a: usize, b: usize) -> Result { - asm!("swi $$0" + llvm_asm!("swi $$0" : "={r0}"(a) : "{r7}"(a), "{r0}"(b) : "memory", "r0", "r1", "r2", "r3", "r4" @@ -32,7 +32,7 @@ } pub unsafe fn syscall2(mut a: usize, b: usize, c: usize) -> Result { - asm!("swi $$0" + llvm_asm!("swi $$0" : "={r0}"(a) : "{r7}"(a), "{r0}"(b), "{r1}"(c) : "memory" @@ -42,7 +42,7 @@ } pub unsafe fn syscall3(mut a: usize, b: usize, c: usize, d: usize) -> Result { - asm!("swi $$0" + llvm_asm!("swi $$0" : "={r0}"(a) : "{r7}"(a), "{r0}"(b), "{r1}"(c), "{r2}"(d) : "memory" @@ -52,7 +52,7 @@ } pub unsafe fn syscall4(mut a: usize, b: usize, c: usize, d: usize, e: usize) -> Result { - asm!("swi $$0" + llvm_asm!("swi $$0" : "={r0}"(a) : "{r7}"(a), "{r0}"(b), "{r1}"(c), "{r2}"(d), "{r3}"(e) : "memory" @@ -63,7 +63,7 @@ pub unsafe fn syscall5(mut a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) -> Result { - asm!("swi $$0" + llvm_asm!("swi $$0" : "={r0}"(a) : "{r7}"(a), "{r0}"(b), "{r1}"(c), "{r2}"(d), "{r3}"(e), "{r4}"(f) : "memory" diff -Nru cargo-0.44.1/vendor/redox_syscall/src/arch/x86_64.rs cargo-0.47.0/vendor/redox_syscall/src/arch/x86_64.rs --- cargo-0.44.1/vendor/redox_syscall/src/arch/x86_64.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/redox_syscall/src/arch/x86_64.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,7 +1,7 @@ use super::error::{Error, Result}; pub unsafe fn syscall0(mut a: usize) -> Result { - asm!("syscall" + llvm_asm!("syscall" : "={rax}"(a) : "{rax}"(a) : "rcx", "r11", "memory" @@ -11,7 +11,7 @@ } pub unsafe fn syscall1(mut a: usize, b: usize) -> Result { - asm!("syscall" + llvm_asm!("syscall" : "={rax}"(a) : "{rax}"(a), "{rdi}"(b) : "rcx", "r11", "memory" @@ -22,7 +22,7 @@ // Clobbers all registers - special for clone pub unsafe fn syscall1_clobber(mut a: usize, b: usize) -> Result { - asm!("syscall" + llvm_asm!("syscall" : "={rax}"(a) : "{rax}"(a), "{rdi}"(b) : "memory", "rbx", "rcx", "rdx", "rsi", "rdi", "r8", @@ -33,7 +33,7 @@ } pub unsafe fn syscall2(mut a: usize, b: usize, c: usize) -> Result { - asm!("syscall" + llvm_asm!("syscall" : "={rax}"(a) : "{rax}"(a), "{rdi}"(b), "{rsi}"(c) : "rcx", "r11", "memory" @@ -43,7 +43,7 @@ } pub unsafe fn syscall3(mut a: usize, b: usize, c: usize, d: usize) -> Result { - asm!("syscall" + llvm_asm!("syscall" : "={rax}"(a) : "{rax}"(a), "{rdi}"(b), "{rsi}"(c), "{rdx}"(d) : "rcx", "r11", "memory" @@ -53,7 +53,7 @@ } pub unsafe fn syscall4(mut a: usize, b: usize, c: usize, d: usize, e: usize) -> Result { - asm!("syscall" + llvm_asm!("syscall" : "={rax}"(a) : "{rax}"(a), "{rdi}"(b), "{rsi}"(c), "{rdx}"(d), "{r10}"(e) : "rcx", "r11", "memory" @@ -64,7 +64,7 @@ pub unsafe fn syscall5(mut a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) -> Result { - asm!("syscall" + llvm_asm!("syscall" : "={rax}"(a) : "{rax}"(a), "{rdi}"(b), "{rsi}"(c), "{rdx}"(d), "{r10}"(e), "{r8}"(f) : "rcx", "r11", "memory" diff -Nru cargo-0.44.1/vendor/redox_syscall/src/arch/x86.rs cargo-0.47.0/vendor/redox_syscall/src/arch/x86.rs --- cargo-0.44.1/vendor/redox_syscall/src/arch/x86.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/redox_syscall/src/arch/x86.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,7 +1,7 @@ use super::error::{Error, Result}; pub unsafe fn syscall0(mut a: usize) -> Result { - asm!("int 0x80" + llvm_asm!("int 0x80" : "={eax}"(a) : "{eax}"(a) : "memory" @@ -11,7 +11,7 @@ } pub unsafe fn syscall1(mut a: usize, b: usize) -> Result { - asm!("int 0x80" + llvm_asm!("int 0x80" : "={eax}"(a) : "{eax}"(a), "{ebx}"(b) : "memory" @@ -22,7 +22,7 @@ // Clobbers all registers - special for clone pub unsafe fn syscall1_clobber(mut a: usize, b: usize) -> Result { - asm!("int 0x80" + llvm_asm!("int 0x80" : "={eax}"(a) : "{eax}"(a), "{ebx}"(b) : "memory", "ebx", "ecx", "edx", "esi", "edi" @@ -32,7 +32,7 @@ } pub unsafe fn syscall2(mut a: usize, b: usize, c: usize) -> Result { - asm!("int 0x80" + llvm_asm!("int 0x80" : "={eax}"(a) : "{eax}"(a), "{ebx}"(b), "{ecx}"(c) : "memory" @@ -42,7 +42,7 @@ } pub unsafe fn syscall3(mut a: usize, b: usize, c: usize, d: usize) -> Result { - asm!("int 0x80" + llvm_asm!("int 0x80" : "={eax}"(a) : "{eax}"(a), "{ebx}"(b), "{ecx}"(c), "{edx}"(d) : "memory" @@ -52,7 +52,7 @@ } pub unsafe fn syscall4(mut a: usize, b: usize, c: usize, d: usize, e: usize) -> Result { - asm!("int 0x80" + llvm_asm!("int 0x80" : "={eax}"(a) : "{eax}"(a), "{ebx}"(b), "{ecx}"(c), "{edx}"(d), "{esi}"(e) : "memory" @@ -63,7 +63,7 @@ pub unsafe fn syscall5(mut a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) -> Result { - asm!("int 0x80" + llvm_asm!("int 0x80" : "={eax}"(a) : "{eax}"(a), "{ebx}"(b), "{ecx}"(c), "{edx}"(d), "{esi}"(e), "{edi}"(f) : "memory" diff -Nru cargo-0.44.1/vendor/redox_syscall/src/io/pio.rs cargo-0.47.0/vendor/redox_syscall/src/io/pio.rs --- cargo-0.44.1/vendor/redox_syscall/src/io/pio.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/redox_syscall/src/io/pio.rs 2020-10-01 21:38:28.000000000 +0000 @@ -28,7 +28,7 @@ fn read(&self) -> u8 { let value: u8; unsafe { - asm!("in $0, $1" : "={al}"(value) : "{dx}"(self.port) : "memory" : "intel", "volatile"); + llvm_asm!("in $0, $1" : "={al}"(value) : "{dx}"(self.port) : "memory" : "intel", "volatile"); } value } @@ -37,7 +37,7 @@ #[inline(always)] fn write(&mut self, value: u8) { unsafe { - asm!("out $1, $0" : : "{al}"(value), "{dx}"(self.port) : "memory" : "intel", "volatile"); + llvm_asm!("out $1, $0" : : "{al}"(value), "{dx}"(self.port) : "memory" : "intel", "volatile"); } } } @@ -51,7 +51,7 @@ fn read(&self) -> u16 { let value: u16; unsafe { - asm!("in $0, $1" : "={ax}"(value) : "{dx}"(self.port) : "memory" : "intel", "volatile"); + llvm_asm!("in $0, $1" : "={ax}"(value) : "{dx}"(self.port) : "memory" : "intel", "volatile"); } value } @@ -60,7 +60,7 @@ #[inline(always)] fn write(&mut self, value: u16) { unsafe { - asm!("out $1, $0" : : "{ax}"(value), "{dx}"(self.port) : "memory" : "intel", "volatile"); + llvm_asm!("out $1, $0" : : "{ax}"(value), "{dx}"(self.port) : "memory" : "intel", "volatile"); } } } @@ -74,7 +74,7 @@ fn read(&self) -> u32 { let value: u32; unsafe { - asm!("in $0, $1" : "={eax}"(value) : "{dx}"(self.port) : "memory" : "intel", "volatile"); + llvm_asm!("in $0, $1" : "={eax}"(value) : "{dx}"(self.port) : "memory" : "intel", "volatile"); } value } @@ -83,7 +83,7 @@ #[inline(always)] fn write(&mut self, value: u32) { unsafe { - asm!("out $1, $0" : : "{eax}"(value), "{dx}"(self.port) : "memory" : "intel", "volatile"); + llvm_asm!("out $1, $0" : : "{eax}"(value), "{dx}"(self.port) : "memory" : "intel", "volatile"); } } } diff -Nru cargo-0.44.1/vendor/redox_syscall/src/lib.rs cargo-0.47.0/vendor/redox_syscall/src/lib.rs --- cargo-0.44.1/vendor/redox_syscall/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/redox_syscall/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,4 @@ -#![cfg_attr(nightly, feature(asm))] +#![cfg_attr(nightly, feature(llvm_asm))] #![cfg_attr(nightly, feature(const_fn))] #![cfg_attr(not(test), no_std)] diff -Nru cargo-0.44.1/vendor/regex/.cargo-checksum.json cargo-0.47.0/vendor/regex/.cargo-checksum.json --- cargo-0.44.1/vendor/regex/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"a6020f034922e3194c711b82a627453881bc4682166cabb07134a10c26ba7692"} \ No newline at end of file +{"files":{},"package":"9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/regex/Cargo.lock cargo-0.47.0/vendor/regex/Cargo.lock --- cargo-0.44.1/vendor/regex/Cargo.lock 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex/Cargo.lock 2020-10-01 21:38:28.000000000 +0000 @@ -2,9 +2,9 @@ # It is not intended for manual editing. [[package]] name = "aho-corasick" -version = "0.7.7" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f56c476256dc249def911d6f7580b5fc7e875895b5d7ee88f5d602208035744" +checksum = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" dependencies = [ "memchr", ] @@ -31,12 +31,6 @@ ] [[package]] -name = "doc-comment" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "923dea538cea0aa3025e8685b20d6ee21ef99c4f77e954a30febbaac5ec73a97" - -[[package]] name = "fuchsia-cprng" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -50,15 +44,15 @@ [[package]] name = "libc" -version = "0.2.66" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" +checksum = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49" [[package]] name = "memchr" -version = "2.3.0" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3197e20c7edb283f87c071ddfc7a2cca8f8e0b888c242959846a6fce03c72223" +checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" [[package]] name = "quickcheck" @@ -187,10 +181,9 @@ [[package]] name = "regex" -version = "1.3.7" +version = "1.3.9" dependencies = [ "aho-corasick", - "doc-comment", "lazy_static", "memchr", "quickcheck", @@ -201,9 +194,9 @@ [[package]] name = "regex-syntax" -version = "0.6.17" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae" +checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" [[package]] name = "thread_local" diff -Nru cargo-0.44.1/vendor/regex/Cargo.toml cargo-0.47.0/vendor/regex/Cargo.toml --- cargo-0.44.1/vendor/regex/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "regex" -version = "1.3.7" +version = "1.3.9" authors = ["The Rust Project Developers"] exclude = ["/scripts/*", "/.github/*"] autotests = false @@ -80,15 +80,12 @@ optional = true [dependencies.regex-syntax] -version = "0.6.17" +version = "0.6.18" default-features = false [dependencies.thread_local] version = "1" optional = true -[dev-dependencies.doc-comment] -version = "0.3" - [dev-dependencies.lazy_static] version = "1" diff -Nru cargo-0.44.1/vendor/regex/CHANGELOG.md cargo-0.47.0/vendor/regex/CHANGELOG.md --- cargo-0.44.1/vendor/regex/CHANGELOG.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex/CHANGELOG.md 2020-10-01 21:38:28.000000000 +0000 @@ -1,3 +1,33 @@ +1.3.9 (2020-05-28) +================== +This release fixes a MSRV (Minimum Support Rust Version) regression in the +1.3.8 release. Namely, while 1.3.8 compiles on Rust 1.28, it actually does not +compile on other Rust versions, such as Rust 1.39. + +Bug fixes: + +* [BUG #685](https://github.com/rust-lang/regex/issue/685): + Remove use of `doc_comment` crate, which cannot be used before Rust 1.43. + + +1.3.8 (2020-05-28) +================== +This release contains a couple of important bug fixes driven +by better support for empty-subexpressions in regexes. For +example, regexes like `b|` are now allowed. Major thanks to +[@sliquister](https://github.com/sliquister) for implementing support for this +in [#677](https://github.com/rust-lang/regex/pull/677). + +Bug fixes: + +* [BUG #523](https://github.com/rust-lang/regex/pull/523): + Add note to documentation that spaces can be escaped in `x` mode. +* [BUG #524](https://github.com/rust-lang/regex/issue/524): + Add support for empty sub-expressions, including empty alternations. +* [BUG #659](https://github.com/rust-lang/regex/issue/659): + Fix match bug caused by an empty sub-expression miscompilation. + + 1.3.7 (2020-04-17) ================== This release contains a small bug fix that fixes how `regex` forwards crate diff -Nru cargo-0.44.1/vendor/regex/.pc/relax-test-dep.diff/Cargo.toml cargo-0.47.0/vendor/regex/.pc/relax-test-dep.diff/Cargo.toml --- cargo-0.44.1/vendor/regex/.pc/relax-test-dep.diff/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex/.pc/relax-test-dep.diff/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "regex" -version = "1.3.7" +version = "1.3.9" authors = ["The Rust Project Developers"] exclude = ["/scripts/*", "/.github/*"] autotests = false @@ -80,15 +80,12 @@ optional = true [dependencies.regex-syntax] -version = "0.6.17" +version = "0.6.18" default-features = false [dependencies.thread_local] version = "1" optional = true -[dev-dependencies.doc-comment] -version = "0.3" - [dev-dependencies.lazy_static] version = "1" diff -Nru cargo-0.44.1/vendor/regex/src/compile.rs cargo-0.47.0/vendor/regex/src/compile.rs --- cargo-0.44.1/vendor/regex/src/compile.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex/src/compile.rs 2020-10-01 21:38:28.000000000 +0000 @@ -15,6 +15,7 @@ use Error; type Result = result::Result; +type ResultOrEmpty = result::Result, Error>; #[derive(Debug)] struct Patch { @@ -132,7 +133,7 @@ self.compiled.start = dotstar_patch.entry; } self.compiled.captures = vec![None]; - let patch = self.c_capture(0, expr)?; + let patch = self.c_capture(0, expr)?.unwrap_or(self.next_inst()); if self.compiled.needs_dotstar() { self.fill(dotstar_patch.hole, patch.entry); } else { @@ -167,14 +168,16 @@ for (i, expr) in exprs[0..exprs.len() - 1].iter().enumerate() { self.fill_to_next(prev_hole); let split = self.push_split_hole(); - let Patch { hole, entry } = self.c_capture(0, expr)?; + let Patch { hole, entry } = + self.c_capture(0, expr)?.unwrap_or(self.next_inst()); self.fill_to_next(hole); self.compiled.matches.push(self.insts.len()); self.push_compiled(Inst::Match(i)); prev_hole = self.fill_split(split, Some(entry), None); } let i = exprs.len() - 1; - let Patch { hole, entry } = self.c_capture(0, &exprs[i])?; + let Patch { hole, entry } = + self.c_capture(0, &exprs[i])?.unwrap_or(self.next_inst()); self.fill(prev_hole, entry); self.fill_to_next(hole); self.compiled.matches.push(self.insts.len()); @@ -242,13 +245,16 @@ /// method you will see that it does exactly this, though it handles /// a list of expressions rather than just the two that we use for /// an example. - fn c(&mut self, expr: &Hir) -> Result { + /// + /// Ok(None) is returned when an expression is compiled to no + /// instruction, and so no patch.entry value makes sense. + fn c(&mut self, expr: &Hir) -> ResultOrEmpty { use prog; use syntax::hir::HirKind::*; self.check_size()?; match *expr.kind() { - Empty => Ok(Patch { hole: Hole::None, entry: self.insts.len() }), + Empty => Ok(None), Literal(hir::Literal::Unicode(c)) => self.c_char(c), Literal(hir::Literal::Byte(b)) => { assert!(self.compiled.uses_bytes()); @@ -357,7 +363,7 @@ } } - fn c_capture(&mut self, first_slot: usize, expr: &Hir) -> Result { + fn c_capture(&mut self, first_slot: usize, expr: &Hir) -> ResultOrEmpty { if self.num_exprs > 1 || self.compiled.is_dfa { // Don't ever compile Save instructions for regex sets because // they are never used. They are also never used in DFA programs @@ -366,11 +372,11 @@ } else { let entry = self.insts.len(); let hole = self.push_hole(InstHole::Save { slot: first_slot }); - let patch = self.c(expr)?; + let patch = self.c(expr)?.unwrap_or(self.next_inst()); self.fill(hole, patch.entry); self.fill_to_next(patch.hole); let hole = self.push_hole(InstHole::Save { slot: first_slot + 1 }); - Ok(Patch { hole: hole, entry: entry }) + Ok(Some(Patch { hole: hole, entry: entry })) } } @@ -381,36 +387,38 @@ greedy: false, hir: Box::new(Hir::any(true)), }))? + .unwrap() } else { self.c(&Hir::repetition(hir::Repetition { kind: hir::RepetitionKind::ZeroOrMore, greedy: false, hir: Box::new(Hir::any(false)), }))? + .unwrap() }) } - fn c_char(&mut self, c: char) -> Result { + fn c_char(&mut self, c: char) -> ResultOrEmpty { if self.compiled.uses_bytes() { if c.is_ascii() { let b = c as u8; let hole = self.push_hole(InstHole::Bytes { start: b, end: b }); self.byte_classes.set_range(b, b); - Ok(Patch { hole, entry: self.insts.len() - 1 }) + Ok(Some(Patch { hole, entry: self.insts.len() - 1 })) } else { self.c_class(&[hir::ClassUnicodeRange::new(c, c)]) } } else { let hole = self.push_hole(InstHole::Char { c: c }); - Ok(Patch { hole, entry: self.insts.len() - 1 }) + Ok(Some(Patch { hole, entry: self.insts.len() - 1 })) } } - fn c_class(&mut self, ranges: &[hir::ClassUnicodeRange]) -> Result { + fn c_class(&mut self, ranges: &[hir::ClassUnicodeRange]) -> ResultOrEmpty { assert!(!ranges.is_empty()); if self.compiled.uses_bytes() { - CompileClass { c: self, ranges: ranges }.compile() + Ok(Some(CompileClass { c: self, ranges: ranges }.compile()?)) } else { let ranges: Vec<(char, char)> = ranges.iter().map(|r| (r.start(), r.end())).collect(); @@ -419,15 +427,18 @@ } else { self.push_hole(InstHole::Ranges { ranges: ranges }) }; - Ok(Patch { hole: hole, entry: self.insts.len() - 1 }) + Ok(Some(Patch { hole: hole, entry: self.insts.len() - 1 })) } } - fn c_byte(&mut self, b: u8) -> Result { + fn c_byte(&mut self, b: u8) -> ResultOrEmpty { self.c_class_bytes(&[hir::ClassBytesRange::new(b, b)]) } - fn c_class_bytes(&mut self, ranges: &[hir::ClassBytesRange]) -> Result { + fn c_class_bytes( + &mut self, + ranges: &[hir::ClassBytesRange], + ) -> ResultOrEmpty { debug_assert!(!ranges.is_empty()); let first_split_entry = self.insts.len(); @@ -451,35 +462,39 @@ self.push_hole(InstHole::Bytes { start: r.start(), end: r.end() }), ); self.fill(prev_hole, next); - Ok(Patch { hole: Hole::Many(holes), entry: first_split_entry }) + Ok(Some(Patch { hole: Hole::Many(holes), entry: first_split_entry })) } - fn c_empty_look(&mut self, look: EmptyLook) -> Result { + fn c_empty_look(&mut self, look: EmptyLook) -> ResultOrEmpty { let hole = self.push_hole(InstHole::EmptyLook { look: look }); - Ok(Patch { hole: hole, entry: self.insts.len() - 1 }) + Ok(Some(Patch { hole: hole, entry: self.insts.len() - 1 })) } - fn c_concat<'a, I>(&mut self, exprs: I) -> Result + fn c_concat<'a, I>(&mut self, exprs: I) -> ResultOrEmpty where I: IntoIterator, { let mut exprs = exprs.into_iter(); - let first = match exprs.next() { - Some(expr) => expr, - None => { - return Ok(Patch { hole: Hole::None, entry: self.insts.len() }) + let Patch { mut hole, entry } = loop { + match exprs.next() { + None => return Ok(None), + Some(e) => { + if let Some(p) = self.c(e)? { + break p; + } + } } }; - let Patch { mut hole, entry } = self.c(first)?; for e in exprs { - let p = self.c(e)?; - self.fill(hole, p.entry); - hole = p.hole; + if let Some(p) = self.c(e)? { + self.fill(hole, p.entry); + hole = p.hole; + } } - Ok(Patch { hole: hole, entry: entry }) + Ok(Some(Patch { hole: hole, entry: entry })) } - fn c_alternate(&mut self, exprs: &[Hir]) -> Result { + fn c_alternate(&mut self, exprs: &[Hir]) -> ResultOrEmpty { debug_assert!( exprs.len() >= 2, "alternates must have at least 2 exprs" @@ -492,43 +507,43 @@ // patched to point to the same location. let mut holes = vec![]; - let mut prev_hole = Hole::None; + // true indicates that the hole is a split where we want to fill + // the second branch. + let mut prev_hole = (Hole::None, false); for e in &exprs[0..exprs.len() - 1] { - self.fill_to_next(prev_hole); + if prev_hole.1 { + let next = self.insts.len(); + self.fill_split(prev_hole.0, None, Some(next)); + } else { + self.fill_to_next(prev_hole.0); + } let split = self.push_split_hole(); - let prev_entry = self.insts.len(); - let Patch { hole, entry } = self.c(e)?; - if prev_entry == self.insts.len() { - // TODO(burntsushi): It is kind of silly that we don't support - // empty-subexpressions in alternates, but it is supremely - // awkward to support them in the existing compiler - // infrastructure. This entire compiler needs to be thrown out - // anyway, so don't feel too bad. - return Err(Error::Syntax( - "alternations cannot currently contain \ - empty sub-expressions" - .to_string(), - )); + if let Some(Patch { hole, entry }) = self.c(e)? { + holes.push(hole); + prev_hole = (self.fill_split(split, Some(entry), None), false); + } else { + let (split1, split2) = split.dup_one(); + holes.push(split1); + prev_hole = (split2, true); } - holes.push(hole); - prev_hole = self.fill_split(split, Some(entry), None); } - let prev_entry = self.insts.len(); - let Patch { hole, entry } = self.c(&exprs[exprs.len() - 1])?; - if prev_entry == self.insts.len() { - // TODO(burntsushi): See TODO above. - return Err(Error::Syntax( - "alternations cannot currently contain \ - empty sub-expressions" - .to_string(), - )); + if let Some(Patch { hole, entry }) = self.c(&exprs[exprs.len() - 1])? { + holes.push(hole); + if prev_hole.1 { + self.fill_split(prev_hole.0, None, Some(entry)); + } else { + self.fill(prev_hole.0, entry); + } + } else { + // We ignore prev_hole.1. When it's true, it means we have two + // empty branches both pushing prev_hole.0 into holes, so both + // branches will go to the same place anyway. + holes.push(prev_hole.0); } - holes.push(hole); - self.fill(prev_hole, entry); - Ok(Patch { hole: Hole::Many(holes), entry: first_split_entry }) + Ok(Some(Patch { hole: Hole::Many(holes), entry: first_split_entry })) } - fn c_repeat(&mut self, rep: &hir::Repetition) -> Result { + fn c_repeat(&mut self, rep: &hir::Repetition) -> ResultOrEmpty { use syntax::hir::RepetitionKind::*; match rep.kind { ZeroOrOne => self.c_repeat_zero_or_one(&rep.hir, rep.greedy), @@ -546,24 +561,37 @@ } } - fn c_repeat_zero_or_one(&mut self, expr: &Hir, greedy: bool) -> Result { + fn c_repeat_zero_or_one( + &mut self, + expr: &Hir, + greedy: bool, + ) -> ResultOrEmpty { let split_entry = self.insts.len(); let split = self.push_split_hole(); - let Patch { hole: hole_rep, entry: entry_rep } = self.c(expr)?; - + let Patch { hole: hole_rep, entry: entry_rep } = match self.c(expr)? { + Some(p) => p, + None => return self.pop_split_hole(), + }; let split_hole = if greedy { self.fill_split(split, Some(entry_rep), None) } else { self.fill_split(split, None, Some(entry_rep)) }; let holes = vec![hole_rep, split_hole]; - Ok(Patch { hole: Hole::Many(holes), entry: split_entry }) + Ok(Some(Patch { hole: Hole::Many(holes), entry: split_entry })) } - fn c_repeat_zero_or_more(&mut self, expr: &Hir, greedy: bool) -> Result { + fn c_repeat_zero_or_more( + &mut self, + expr: &Hir, + greedy: bool, + ) -> ResultOrEmpty { let split_entry = self.insts.len(); let split = self.push_split_hole(); - let Patch { hole: hole_rep, entry: entry_rep } = self.c(expr)?; + let Patch { hole: hole_rep, entry: entry_rep } = match self.c(expr)? { + Some(p) => p, + None => return self.pop_split_hole(), + }; self.fill(hole_rep, split_entry); let split_hole = if greedy { @@ -571,11 +599,18 @@ } else { self.fill_split(split, None, Some(entry_rep)) }; - Ok(Patch { hole: split_hole, entry: split_entry }) + Ok(Some(Patch { hole: split_hole, entry: split_entry })) } - fn c_repeat_one_or_more(&mut self, expr: &Hir, greedy: bool) -> Result { - let Patch { hole: hole_rep, entry: entry_rep } = self.c(expr)?; + fn c_repeat_one_or_more( + &mut self, + expr: &Hir, + greedy: bool, + ) -> ResultOrEmpty { + let Patch { hole: hole_rep, entry: entry_rep } = match self.c(expr)? { + Some(p) => p, + None => return Ok(None), + }; self.fill_to_next(hole_rep); let split = self.push_split_hole(); @@ -584,7 +619,7 @@ } else { self.fill_split(split, None, Some(entry_rep)) }; - Ok(Patch { hole: split_hole, entry: entry_rep }) + Ok(Some(Patch { hole: split_hole, entry: entry_rep })) } fn c_repeat_range_min_or_more( @@ -592,12 +627,20 @@ expr: &Hir, greedy: bool, min: u32, - ) -> Result { + ) -> ResultOrEmpty { let min = u32_to_usize(min); - let patch_concat = self.c_concat(iter::repeat(expr).take(min))?; - let patch_rep = self.c_repeat_zero_or_more(expr, greedy)?; - self.fill(patch_concat.hole, patch_rep.entry); - Ok(Patch { hole: patch_rep.hole, entry: patch_concat.entry }) + // Using next_inst() is ok, because we can't return it (concat would + // have to return Some(_) while c_repeat_range_min_or_more returns + // None). + let patch_concat = self + .c_concat(iter::repeat(expr).take(min))? + .unwrap_or(self.next_inst()); + if let Some(patch_rep) = self.c_repeat_zero_or_more(expr, greedy)? { + self.fill(patch_concat.hole, patch_rep.entry); + Ok(Some(Patch { hole: patch_rep.hole, entry: patch_concat.entry })) + } else { + Ok(None) + } } fn c_repeat_range( @@ -606,13 +649,17 @@ greedy: bool, min: u32, max: u32, - ) -> Result { + ) -> ResultOrEmpty { let (min, max) = (u32_to_usize(min), u32_to_usize(max)); + debug_assert!(min <= max); let patch_concat = self.c_concat(iter::repeat(expr).take(min))?; - let initial_entry = patch_concat.entry; if min == max { return Ok(patch_concat); } + // Same reasoning as in c_repeat_range_min_or_more (we know that min < + // max at this point). + let patch_concat = patch_concat.unwrap_or(self.next_inst()); + let initial_entry = patch_concat.entry; // It is much simpler to compile, e.g., `a{2,5}` as: // // aaa?a?a? @@ -637,7 +684,10 @@ for _ in min..max { self.fill_to_next(prev_hole); let split = self.push_split_hole(); - let Patch { hole, entry } = self.c(expr)?; + let Patch { hole, entry } = match self.c(expr)? { + Some(p) => p, + None => return self.pop_split_hole(), + }; prev_hole = hole; if greedy { holes.push(self.fill_split(split, Some(entry), None)); @@ -646,7 +696,14 @@ } } holes.push(prev_hole); - Ok(Patch { hole: Hole::Many(holes), entry: initial_entry }) + Ok(Some(Patch { hole: Hole::Many(holes), entry: initial_entry })) + } + + /// Can be used as a default value for the c_* functions when the call to + /// c_function is followed by inserting at least one instruction that is + /// always executed after the ones written by the c* function. + fn next_inst(&self) -> Patch { + Patch { hole: Hole::None, entry: self.insts.len() } } fn fill(&mut self, hole: Hole, goto: InstPtr) { @@ -726,6 +783,11 @@ Hole::One(hole) } + fn pop_split_hole(&mut self) -> ResultOrEmpty { + self.insts.pop(); + Ok(None) + } + fn check_size(&self) -> result::Result<(), Error> { use std::mem::size_of; @@ -744,6 +806,17 @@ Many(Vec), } +impl Hole { + fn dup_one(self) -> (Self, Self) { + match self { + Hole::One(pc) => (Hole::One(pc), Hole::One(pc)), + Hole::None | Hole::Many(_) => { + unreachable!("must be called on single hole") + } + } + } +} + #[derive(Clone, Debug)] enum MaybeInst { Compiled(Inst), @@ -755,13 +828,22 @@ impl MaybeInst { fn fill(&mut self, goto: InstPtr) { - let filled = match *self { - MaybeInst::Uncompiled(ref inst) => inst.fill(goto), + let maybeinst = match *self { + MaybeInst::Split => MaybeInst::Split1(goto), + MaybeInst::Uncompiled(ref inst) => { + MaybeInst::Compiled(inst.fill(goto)) + } MaybeInst::Split1(goto1) => { - Inst::Split(InstSplit { goto1: goto1, goto2: goto }) + MaybeInst::Compiled(Inst::Split(InstSplit { + goto1: goto1, + goto2: goto, + })) } MaybeInst::Split2(goto2) => { - Inst::Split(InstSplit { goto1: goto, goto2: goto2 }) + MaybeInst::Compiled(Inst::Split(InstSplit { + goto1: goto, + goto2: goto2, + })) } _ => unreachable!( "not all instructions were compiled! \ @@ -769,7 +851,7 @@ self ), }; - *self = MaybeInst::Compiled(filled); + *self = maybeinst; } fn fill_split(&mut self, goto1: InstPtr, goto2: InstPtr) { diff -Nru cargo-0.44.1/vendor/regex/src/lib.rs cargo-0.47.0/vendor/regex/src/lib.rs --- cargo-0.44.1/vendor/regex/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -152,8 +152,9 @@ ``` If you wish to match against whitespace in this mode, you can still use `\s`, -`\n`, `\t`, etc. For escaping a single space character, you can use its hex -character code `\x20` or temporarily disable the `x` flag, e.g., `(?-x: )`. +`\n`, `\t`, etc. For escaping a single space character, you can escape it +directly with `\ `, use its hex character code `\x20` or temporarily disable +the `x` flag, e.g., `(?-x: )`. # Example: match multiple regular expressions simultaneously @@ -621,8 +622,8 @@ #[cfg(feature = "perf-literal")] extern crate aho_corasick; -#[cfg(test)] -extern crate doc_comment; +// #[cfg(doctest)] +// extern crate doc_comment; #[cfg(feature = "perf-literal")] extern crate memchr; #[cfg(test)] @@ -632,8 +633,8 @@ #[cfg(feature = "perf-cache")] extern crate thread_local; -#[cfg(test)] -doc_comment::doctest!("../README.md"); +// #[cfg(doctest)] +// doc_comment::doctest!("../README.md"); #[cfg(feature = "std")] pub use error::Error; diff -Nru cargo-0.44.1/vendor/regex/tests/crazy.rs cargo-0.47.0/vendor/regex/tests/crazy.rs --- cargo-0.44.1/vendor/regex/tests/crazy.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex/tests/crazy.rs 2020-10-01 21:38:28.000000000 +0000 @@ -118,6 +118,18 @@ matiter!(match_empty9, r"z|()+", "abc", (0, 0), (1, 1), (2, 2), (3, 3)); matiter!(match_empty10, r"()+|b", "abc", (0, 0), (1, 1), (2, 2), (3, 3)); matiter!(match_empty11, r"b|()+", "abc", (0, 0), (1, 2), (3, 3)); +matiter!(match_empty12, r"|b", "abc", (0, 0), (1, 1), (2, 2), (3, 3)); +matiter!(match_empty13, r"b|", "abc", (0, 0), (1, 2), (3, 3)); +matiter!(match_empty14, r"|z", "abc", (0, 0), (1, 1), (2, 2), (3, 3)); +matiter!(match_empty15, r"z|", "abc", (0, 0), (1, 1), (2, 2), (3, 3)); +matiter!(match_empty16, r"|", "abc", (0, 0), (1, 1), (2, 2), (3, 3)); +matiter!(match_empty17, r"||", "abc", (0, 0), (1, 1), (2, 2), (3, 3)); +matiter!(match_empty18, r"||z", "abc", (0, 0), (1, 1), (2, 2), (3, 3)); +matiter!(match_empty19, r"(?:)|b", "abc", (0, 0), (1, 1), (2, 2), (3, 3)); +matiter!(match_empty20, r"b|(?:)", "abc", (0, 0), (1, 2), (3, 3)); +matiter!(match_empty21, r"(?:|)", "abc", (0, 0), (1, 1), (2, 2), (3, 3)); +matiter!(match_empty22, r"(?:|)|z", "abc", (0, 0), (1, 1), (2, 2), (3, 3)); +matiter!(match_empty23, r"a(?:)|b", "abc", (0, 1), (1, 2)); // Test that the DFA can handle pathological cases. // (This should result in the DFA's cache being flushed too frequently, which diff -Nru cargo-0.44.1/vendor/regex/tests/noparse.rs cargo-0.47.0/vendor/regex/tests/noparse.rs --- cargo-0.44.1/vendor/regex/tests/noparse.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex/tests/noparse.rs 2020-10-01 21:38:28.000000000 +0000 @@ -26,6 +26,8 @@ noparse!(fail_bad_flag, "(?a)a"); noparse!(fail_too_big, "a{10000000}"); noparse!(fail_counted_no_close, "a{1001"); +noparse!(fail_counted_decreasing, "a{2,1}"); +noparse!(fail_counted_nonnegative, "a{-1,1}"); noparse!(fail_unfinished_cap, "(?"); noparse!(fail_unfinished_escape, "\\"); noparse!(fail_octal_digit, r"\8"); @@ -41,10 +43,3 @@ noparse!(fail_range_end_no_begin, r"[a-\A]"); noparse!(fail_range_end_no_end, r"[a-\z]"); noparse!(fail_range_end_no_boundary, r"[a-\b]"); -noparse!(fail_empty_alt1, r"|z"); -noparse!(fail_empty_alt2, r"z|"); -noparse!(fail_empty_alt3, r"|"); -noparse!(fail_empty_alt4, r"||"); -noparse!(fail_empty_alt5, r"()|z"); -noparse!(fail_empty_alt6, r"z|()"); -noparse!(fail_empty_alt7, r"(|)"); diff -Nru cargo-0.44.1/vendor/regex/tests/regression.rs cargo-0.47.0/vendor/regex/tests/regression.rs --- cargo-0.44.1/vendor/regex/tests/regression.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex/tests/regression.rs 2020-10-01 21:38:28.000000000 +0000 @@ -210,3 +210,10 @@ (4, 7), (12, 15) ); + +// See: https://github.com/rust-lang/regex/issues/659 +// +// Note that 'Ј' is not 'j', but cyrillic Je +// https://en.wikipedia.org/wiki/Je_(Cyrillic) +ismatch!(empty_group_match, r"()Ј01", "zЈ01", true); +matiter!(empty_group_find, r"()Ј01", "zЈ01", (1, 5)); diff -Nru cargo-0.44.1/vendor/regex/tests/set.rs cargo-0.47.0/vendor/regex/tests/set.rs --- cargo-0.44.1/vendor/regex/tests/set.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex/tests/set.rs 2020-10-01 21:38:28.000000000 +0000 @@ -17,6 +17,17 @@ matset!(set17, &[".*a"], "a", 0); matset!(set18, &["a", "β"], "β", 1); +// regexes that match the empty string +matset!(setempty1, &["", "a"], "abc", 0, 1); +matset!(setempty2, &["", "b"], "abc", 0, 1); +matset!(setempty3, &["", "z"], "abc", 0); +matset!(setempty4, &["a", ""], "abc", 0, 1); +matset!(setempty5, &["b", ""], "abc", 0, 1); +matset!(setempty6, &["z", ""], "abc", 1); +matset!(setempty7, &["b", "(?:)"], "abc", 0, 1); +matset!(setempty8, &["(?:)", "b"], "abc", 0, 1); +matset!(setempty9, &["c(?:)", "b"], "abc", 0, 1); + nomatset!(nset1, &["a", "a"], "b"); nomatset!(nset2, &["^foo", "bar$"], "bar foo"); nomatset!( diff -Nru cargo-0.44.1/vendor/regex-syntax/.cargo-checksum.json cargo-0.47.0/vendor/regex-syntax/.cargo-checksum.json --- cargo-0.44.1/vendor/regex-syntax/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae"} \ No newline at end of file +{"files":{},"package":"26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/regex-syntax/Cargo.toml cargo-0.47.0/vendor/regex-syntax/Cargo.toml --- cargo-0.44.1/vendor/regex-syntax/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "regex-syntax" -version = "0.6.17" +version = "0.6.18" authors = ["The Rust Project Developers"] description = "A regular expression parser." homepage = "https://github.com/rust-lang/regex" diff -Nru cargo-0.44.1/vendor/regex-syntax/src/hir/mod.rs cargo-0.47.0/vendor/regex-syntax/src/hir/mod.rs --- cargo-0.44.1/vendor/regex-syntax/src/hir/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/hir/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -241,8 +241,8 @@ info.set_any_anchored_start(false); info.set_any_anchored_end(false); info.set_match_empty(true); - info.set_literal(true); - info.set_alternation_literal(true); + info.set_literal(false); + info.set_alternation_literal(false); Hir { kind: HirKind::Empty, info: info } } @@ -671,8 +671,8 @@ /// true when this HIR expression is either itself a `Literal` or a /// concatenation of only `Literal`s. /// - /// For example, `f` and `foo` are literals, but `f+`, `(foo)`, `foo()` - /// are not (even though that contain sub-expressions that are literals). + /// For example, `f` and `foo` are literals, but `f+`, `(foo)`, `foo()`, + /// `` are not (even though that contain sub-expressions that are literals). pub fn is_literal(&self) -> bool { self.info.is_literal() } @@ -682,8 +682,8 @@ /// true when this HIR expression is either itself a `Literal` or a /// concatenation of only `Literal`s or an alternation of only `Literal`s. /// - /// For example, `f`, `foo`, `a|b|c`, and `foo|bar|baz` are alternaiton - /// literals, but `f+`, `(foo)`, `foo()` + /// For example, `f`, `foo`, `a|b|c`, and `foo|bar|baz` are alternation + /// literals, but `f+`, `(foo)`, `foo()`, `` /// are not (even though that contain sub-expressions that are literals). pub fn is_alternation_literal(&self) -> bool { self.info.is_alternation_literal() @@ -1496,7 +1496,7 @@ self.bools &= !(1 << $bit); } } - } + }; } impl HirInfo { diff -Nru cargo-0.44.1/vendor/regex-syntax/src/hir/translate.rs cargo-0.47.0/vendor/regex-syntax/src/hir/translate.rs --- cargo-0.44.1/vendor/regex-syntax/src/hir/translate.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/hir/translate.rs 2020-10-01 21:38:28.000000000 +0000 @@ -3105,13 +3105,13 @@ #[test] fn analysis_is_literal() { // Positive examples. - assert!(t(r"").is_literal()); assert!(t(r"a").is_literal()); assert!(t(r"ab").is_literal()); assert!(t(r"abc").is_literal()); assert!(t(r"(?m)abc").is_literal()); // Negative examples. + assert!(!t(r"").is_literal()); assert!(!t(r"^").is_literal()); assert!(!t(r"a|b").is_literal()); assert!(!t(r"(a)").is_literal()); @@ -3124,7 +3124,6 @@ #[test] fn analysis_is_alternation_literal() { // Positive examples. - assert!(t(r"").is_alternation_literal()); assert!(t(r"a").is_alternation_literal()); assert!(t(r"ab").is_alternation_literal()); assert!(t(r"abc").is_alternation_literal()); @@ -3135,6 +3134,7 @@ assert!(t(r"foo|bar|baz").is_alternation_literal()); // Negative examples. + assert!(!t(r"").is_alternation_literal()); assert!(!t(r"^").is_alternation_literal()); assert!(!t(r"(a)").is_alternation_literal()); assert!(!t(r"a+").is_alternation_literal()); diff -Nru cargo-0.44.1/vendor/regex-syntax/src/lib.rs cargo-0.47.0/vendor/regex-syntax/src/lib.rs --- cargo-0.44.1/vendor/regex-syntax/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -175,7 +175,7 @@ /// The string returned may be safely used as a literal in a regular /// expression. pub fn escape(text: &str) -> String { - let mut quoted = String::with_capacity(text.len()); + let mut quoted = String::new(); escape_into(text, &mut quoted); quoted } @@ -185,6 +185,7 @@ /// This will append escape characters into the given buffer. The characters /// that are appended are safe to use as a literal in a regular expression. pub fn escape_into(text: &str, buf: &mut String) { + buf.reserve(text.len()); for c in text.chars() { if is_meta_character(c) { buf.push('\\'); diff -Nru cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/age.rs cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/age.rs --- cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/age.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/age.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // -// ucd-generate age ucd-13.0.0 --chars +// ucd-generate age ucd-13.0.0 --chars // -// ucd-generate 0.2.7 is available on crates.io. +// Unicode version: 13.0.0. +// +// ucd-generate 0.2.8 is available on crates.io. pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ ("V10_0", V10_0), diff -Nru cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/case_folding_simple.rs cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/case_folding_simple.rs --- cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/case_folding_simple.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/case_folding_simple.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // -// ucd-generate case-folding-simple ucd-13.0.0 --chars --all-pairs +// ucd-generate case-folding-simple ucd-13.0.0 --chars --all-pairs // -// ucd-generate 0.2.7 is available on crates.io. +// Unicode version: 13.0.0. +// +// ucd-generate 0.2.8 is available on crates.io. pub const CASE_FOLDING_SIMPLE: &'static [(char, &'static [char])] = &[ ('A', &['a']), diff -Nru cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/general_category.rs cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/general_category.rs --- cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/general_category.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/general_category.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // -// ucd-generate general-category ucd-13.0.0 --chars --exclude surrogate +// ucd-generate general-category ucd-13.0.0 --chars --exclude surrogate // -// ucd-generate 0.2.7 is available on crates.io. +// Unicode version: 13.0.0. +// +// ucd-generate 0.2.8 is available on crates.io. pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ ("Cased_Letter", CASED_LETTER), diff -Nru cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/grapheme_cluster_break.rs cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/grapheme_cluster_break.rs --- cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/grapheme_cluster_break.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/grapheme_cluster_break.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // -// ucd-generate grapheme-cluster-break ucd-13.0.0 --chars +// ucd-generate grapheme-cluster-break ucd-13.0.0 --chars // -// ucd-generate 0.2.7 is available on crates.io. +// Unicode version: 13.0.0. +// +// ucd-generate 0.2.8 is available on crates.io. pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ ("CR", CR), diff -Nru cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/perl_decimal.rs cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/perl_decimal.rs --- cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/perl_decimal.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/perl_decimal.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // -// ucd-generate general-category ucd-13.0.0 --chars --include decimalnumber +// ucd-generate general-category ucd-13.0.0 --chars --include decimalnumber // -// ucd-generate 0.2.7 is available on crates.io. +// Unicode version: 13.0.0. +// +// ucd-generate 0.2.8 is available on crates.io. pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[("Decimal_Number", DECIMAL_NUMBER)]; diff -Nru cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/perl_space.rs cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/perl_space.rs --- cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/perl_space.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/perl_space.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // -// ucd-generate property-bool ucd-13.0.0 --chars --include whitespace +// ucd-generate property-bool ucd-13.0.0 --chars --include whitespace // -// ucd-generate 0.2.7 is available on crates.io. +// Unicode version: 13.0.0. +// +// ucd-generate 0.2.8 is available on crates.io. pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[("White_Space", WHITE_SPACE)]; diff -Nru cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/perl_word.rs cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/perl_word.rs --- cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/perl_word.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/perl_word.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // -// ucd-generate perl-word ucd-13.0.0 --chars +// ucd-generate perl-word ucd-13.0.0 --chars // -// ucd-generate 0.2.7 is available on crates.io. +// Unicode version: 13.0.0. +// +// ucd-generate 0.2.8 is available on crates.io. pub const PERL_WORD: &'static [(char, char)] = &[ ('0', '9'), diff -Nru cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/property_bool.rs cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/property_bool.rs --- cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/property_bool.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/property_bool.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // -// ucd-generate property-bool ucd-13.0.0 --chars +// ucd-generate property-bool ucd-13.0.0 --chars // -// ucd-generate 0.2.7 is available on crates.io. +// Unicode version: 13.0.0. +// +// ucd-generate 0.2.8 is available on crates.io. pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ ("ASCII_Hex_Digit", ASCII_HEX_DIGIT), diff -Nru cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/property_names.rs cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/property_names.rs --- cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/property_names.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/property_names.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // -// ucd-generate property-names ucd-13.0.0 +// ucd-generate property-names ucd-13.0.0 // -// ucd-generate 0.2.7 is available on crates.io. +// Unicode version: 13.0.0. +// +// ucd-generate 0.2.8 is available on crates.io. pub const PROPERTY_NAMES: &'static [(&'static str, &'static str)] = &[ ("age", "Age"), diff -Nru cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/property_values.rs cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/property_values.rs --- cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/property_values.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/property_values.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // -// ucd-generate property-values ucd-13.0.0 --include gc,script,scx,age,gcb,wb,sb +// ucd-generate property-values ucd-13.0.0 --include gc,script,scx,age,gcb,wb,sb // -// ucd-generate 0.2.7 is available on crates.io. +// Unicode version: 13.0.0. +// +// ucd-generate 0.2.8 is available on crates.io. pub const PROPERTY_VALUES: &'static [( &'static str, diff -Nru cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/script_extension.rs cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/script_extension.rs --- cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/script_extension.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/script_extension.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // -// ucd-generate script-extension ucd-13.0.0 --chars +// ucd-generate script-extension ucd-13.0.0 --chars // -// ucd-generate 0.2.7 is available on crates.io. +// Unicode version: 13.0.0. +// +// ucd-generate 0.2.8 is available on crates.io. pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ ("Adlam", ADLAM), diff -Nru cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/script.rs cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/script.rs --- cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/script.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/script.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // -// ucd-generate script ucd-13.0.0 --chars +// ucd-generate script ucd-13.0.0 --chars // -// ucd-generate 0.2.7 is available on crates.io. +// Unicode version: 13.0.0. +// +// ucd-generate 0.2.8 is available on crates.io. pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ ("Adlam", ADLAM), diff -Nru cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/sentence_break.rs cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/sentence_break.rs --- cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/sentence_break.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/sentence_break.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // -// ucd-generate sentence-break ucd-13.0.0 --chars +// ucd-generate sentence-break ucd-13.0.0 --chars // -// ucd-generate 0.2.7 is available on crates.io. +// Unicode version: 13.0.0. +// +// ucd-generate 0.2.8 is available on crates.io. pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ ("ATerm", ATERM), diff -Nru cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/word_break.rs cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/word_break.rs --- cargo-0.44.1/vendor/regex-syntax/src/unicode_tables/word_break.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/regex-syntax/src/unicode_tables/word_break.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,10 @@ // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // -// ucd-generate word-break ucd-13.0.0 --chars +// ucd-generate word-break ucd-13.0.0 --chars // -// ucd-generate 0.2.7 is available on crates.io. +// Unicode version: 13.0.0. +// +// ucd-generate 0.2.8 is available on crates.io. pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ ("ALetter", ALETTER), diff -Nru cargo-0.44.1/vendor/remove_dir_all/.cargo-checksum.json cargo-0.47.0/vendor/remove_dir_all/.cargo-checksum.json --- cargo-0.44.1/vendor/remove_dir_all/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/remove_dir_all/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e"} \ No newline at end of file +{"files":{},"package":"3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/remove_dir_all/Cargo.toml cargo-0.47.0/vendor/remove_dir_all/Cargo.toml --- cargo-0.44.1/vendor/remove_dir_all/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/remove_dir_all/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,9 +12,9 @@ [package] name = "remove_dir_all" -version = "0.5.2" +version = "0.5.3" authors = ["Aaronepower "] -include = ["Cargo.toml", "LICENCE-APACHE", "LICENCE-MIT", "src/**/*"] +include = ["Cargo.toml", "LICENCE-APACHE", "LICENCE-MIT", "src/**/*", "README.md"] description = "A safe, reliable implementation of remove_dir_all for Windows" readme = "README.md" keywords = ["utility", "filesystem", "remove_dir", "windows"] diff -Nru cargo-0.44.1/vendor/remove_dir_all/README.md cargo-0.47.0/vendor/remove_dir_all/README.md --- cargo-0.44.1/vendor/remove_dir_all/README.md 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/remove_dir_all/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,20 @@ +# remove_dir_all + +[![Latest Version](https://img.shields.io/crates/v/remove_dir_all.svg)](https://crates.io/crates/remove_dir_all) +[![Docs](https://docs.rs/remove_dir_all/badge.svg)](https://docs.rs/remove_dir_all) +[![License](https://img.shields.io/github/license/XAMPPRocky/remove_dir_all.svg)](https://github.com/XAMPPRocky/remove_dir_all) + +## Description + +A reliable implementation of `remove_dir_all` for Windows. For Unix systems +re-exports `std::fs::remove_dir_all`. + +```rust,no_run +extern crate remove_dir_all; + +use remove_dir_all::*; + +fn main() { + remove_dir_all("./temp/").unwrap(); +} +``` diff -Nru cargo-0.44.1/vendor/remove_dir_all/src/fs.rs cargo-0.47.0/vendor/remove_dir_all/src/fs.rs --- cargo-0.44.1/vendor/remove_dir_all/src/fs.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/remove_dir_all/src/fs.rs 2020-10-01 21:38:28.000000000 +0000 @@ -20,7 +20,8 @@ counter: u64, } -/// Reliably removes directory and all of it's children. +/// Reliably removes a directory and all of its children. +/// /// ```rust /// extern crate remove_dir_all; /// diff -Nru cargo-0.44.1/vendor/remove_dir_all/src/lib.rs cargo-0.47.0/vendor/remove_dir_all/src/lib.rs --- cargo-0.44.1/vendor/remove_dir_all/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/remove_dir_all/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,11 +1,19 @@ +//! Reliably remove a directory and all of its children. +//! +//! This library provides a reliable implementation of `remove_dir_all` for Windows. +//! For Unix systems, it re-exports `std::fs::remove_dir_all`. + +#![deny(missing_debug_implementations)] +#![deny(missing_docs)] + #[cfg(windows)] extern crate winapi; -#[cfg(test)] +#[cfg(doctest)] #[macro_use] extern crate doc_comment; -#[cfg(test)] +#[cfg(doctest)] doctest!("../README.md"); #[cfg(windows)] diff -Nru cargo-0.44.1/vendor/ryu/benches/bench.rs cargo-0.47.0/vendor/ryu/benches/bench.rs --- cargo-0.44.1/vendor/ryu/benches/bench.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/benches/bench.rs 2020-10-01 21:38:28.000000000 +0000 @@ -2,18 +2,19 @@ #![feature(test)] -extern crate ryu; extern crate test; +use std::io::Write; +use std::{f32, f64}; +use test::{black_box, Bencher}; + macro_rules! benches { ($($name:ident($value:expr),)*) => { mod bench_ryu { - use test::{Bencher, black_box}; + use super::*; $( #[bench] fn $name(b: &mut Bencher) { - use ryu; - let mut buf = ryu::Buffer::new(); b.iter(move || { @@ -26,12 +27,10 @@ } mod bench_std_fmt { - use test::{Bencher, black_box}; + use super::*; $( #[bench] fn $name(b: &mut Bencher) { - use std::io::Write; - let mut buf = Vec::with_capacity(20); b.iter(|| { @@ -43,16 +42,16 @@ } )* } - } + }; } -benches!( +benches! { bench_0_f64(0f64), bench_short_f64(0.1234f64), bench_e_f64(2.718281828459045f64), - bench_max_f64(::std::f64::MAX), + bench_max_f64(f64::MAX), bench_0_f32(0f32), bench_short_f32(0.1234f32), bench_e_f32(2.718281828459045f32), - bench_max_f32(::std::f32::MAX), -); + bench_max_f32(f32::MAX), +} diff -Nru cargo-0.44.1/vendor/ryu/build.rs cargo-0.47.0/vendor/ryu/build.rs --- cargo-0.44.1/vendor/ryu/build.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -14,21 +14,12 @@ let target = env::var("TARGET").unwrap(); let emscripten = target == "asmjs-unknown-emscripten" || target == "wasm32-unknown-emscripten"; - // 128-bit integers stabilized in Rust 1.26: - // https://blog.rust-lang.org/2018/05/10/Rust-1.26.html - // - // Disabled on Emscripten targets as Emscripten doesn't + // 128-bit integers disabled on Emscripten targets as Emscripten doesn't // currently support integers larger than 64 bits. - if minor >= 26 && !emscripten { + if !emscripten { println!("cargo:rustc-cfg=integer128"); } - // #[must_use] on functions stabilized in Rust 1.27: - // https://blog.rust-lang.org/2018/06/21/Rust-1.27.html - if minor >= 27 { - println!("cargo:rustc-cfg=must_use_return"); - } - // MaybeUninit stabilized in Rust 1.36: // https://blog.rust-lang.org/2019/07/04/Rust-1.36.0.html if minor >= 36 { @@ -37,30 +28,13 @@ } fn rustc_minor_version() -> Option { - let rustc = match env::var_os("RUSTC") { - Some(rustc) => rustc, - None => return None, - }; - - let output = match Command::new(rustc).arg("--version").output() { - Ok(output) => output, - Err(_) => return None, - }; - - let version = match str::from_utf8(&output.stdout) { - Ok(version) => version, - Err(_) => return None, - }; - + let rustc = env::var_os("RUSTC")?; + let output = Command::new(rustc).arg("--version").output().ok()?; + let version = str::from_utf8(&output.stdout).ok()?; let mut pieces = version.split('.'); if pieces.next() != Some("rustc 1") { return None; } - - let next = match pieces.next() { - Some(next) => next, - None => return None, - }; - + let next = pieces.next()?; u32::from_str(next).ok() } diff -Nru cargo-0.44.1/vendor/ryu/.cargo-checksum.json cargo-0.47.0/vendor/ryu/.cargo-checksum.json --- cargo-0.44.1/vendor/ryu/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"ed3d612bc64430efeb3f7ee6ef26d590dce0c43249217bddc62112540c7941e1"} \ No newline at end of file +{"files":{},"package":"71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/ryu/Cargo.lock cargo-0.47.0/vendor/ryu/Cargo.lock --- cargo-0.44.1/vendor/ryu/Cargo.lock 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/Cargo.lock 2020-10-01 21:38:28.000000000 +0000 @@ -11,50 +11,50 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.71 (registry+https://github.com/rust-lang/crates.io-index)", "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "hermit-abi" -version = "0.1.10" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.71 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libc" -version = "0.2.68" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "no-panic" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.29 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num_cpus" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.71 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "ppv-lite86" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "proc-macro2" -version = "1.0.10" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -62,10 +62,10 @@ [[package]] name = "quote" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -74,7 +74,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.71 (registry+https://github.com/rust-lang/crates.io-index)", "rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -85,7 +85,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "ppv-lite86 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -115,21 +115,21 @@ [[package]] name = "ryu" -version = "1.0.4" +version = "1.0.5" dependencies = [ - "no-panic 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "no-panic 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "rand_xorshift 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "syn" -version = "1.0.17" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -146,18 +146,18 @@ [metadata] "checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" "checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" -"checksum hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e" -"checksum libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)" = "dea0c0405123bba743ee3f91f49b1c7cfb684eef0da0a50110f758ccf24cdff0" -"checksum no-panic 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "a11c6db47b62f887ae15a0d02c5b24eb9a815536812bc46ca45305a6d22e5675" -"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" -"checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" -"checksum proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)" = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3" -"checksum quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" +"checksum hermit-abi 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "91780f809e750b0a89f5544be56617ff6b1227ee485bcb06ebe10cdf89bd3b71" +"checksum libc 0.2.71 (registry+https://github.com/rust-lang/crates.io-index)" = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49" +"checksum no-panic 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "b4da21758193a9f8a8f1dc698f121e70e8df07c71e4968e49762a5604bbdf72b" +"checksum num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +"checksum ppv-lite86 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" +"checksum proc-macro2 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "1502d12e458c49a4c9cbff560d0fe0060c252bc29799ed94ca2ed4bb665a0101" +"checksum quote 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "54a21852a652ad6f610c9510194f398ff6f8692e334fd1145fed931f7fbe44ea" "checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" "checksum rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" "checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" "checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" "checksum rand_xorshift 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "77d416b86801d23dde1aa643023b775c3a462efc0ed96443add11546cdf1dca8" -"checksum syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03" +"checksum syn 1.0.29 (registry+https://github.com/rust-lang/crates.io-index)" = "bb37da98a55b1d08529362d9cbb863be17556873df2585904ab9d2bc951291d0" "checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" "checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" diff -Nru cargo-0.44.1/vendor/ryu/Cargo.toml cargo-0.47.0/vendor/ryu/Cargo.toml --- cargo-0.44.1/vendor/ryu/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -11,10 +11,10 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "ryu" -version = "1.0.4" +version = "1.0.5" authors = ["David Tolnay "] -build = "build.rs" description = "Fast floating point to string conversion" documentation = "https://docs.rs/ryu" readme = "README.md" @@ -36,5 +36,3 @@ [features] small = [] -[badges.travis-ci] -repository = "dtolnay/ryu" diff -Nru cargo-0.44.1/vendor/ryu/examples/upstream_benchmark.rs cargo-0.47.0/vendor/ryu/examples/upstream_benchmark.rs --- cargo-0.44.1/vendor/ryu/examples/upstream_benchmark.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/examples/upstream_benchmark.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,5 @@ // cargo run --example upstream_benchmark --release -extern crate rand; -extern crate ryu; - use rand::{Rng, SeedableRng}; const SAMPLES: usize = 10000; diff -Nru cargo-0.44.1/vendor/ryu/README.md cargo-0.47.0/vendor/ryu/README.md --- cargo-0.44.1/vendor/ryu/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -1,9 +1,9 @@ # Ryū -[![Build Status](https://api.travis-ci.org/dtolnay/ryu.svg?branch=master)](https://travis-ci.org/dtolnay/ryu) -[![Latest Version](https://img.shields.io/crates/v/ryu.svg)](https://crates.io/crates/ryu) -[![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/ryu) -[![Rustc Version 1.15+](https://img.shields.io/badge/rustc-1.15+-lightgray.svg)](https://blog.rust-lang.org/2017/02/02/Rust-1.15.html) +[github](https://github.com/dtolnay/ryu) +[crates.io](https://crates.io/crates/ryu) +[docs.rs](https://docs.rs/ryu) +[build status](https://github.com/dtolnay/ryu/actions?query=branch%3Amaster) Pure Rust implementation of Ryū, an algorithm to quickly convert floating point numbers to decimal strings. @@ -15,11 +15,11 @@ This Rust implementation is a line-by-line port of Ulf Adams' implementation in C, [https://github.com/ulfjack/ryu][upstream]. -*Requirements: this crate supports any compiler version back to rustc 1.15; it +*Requirements: this crate supports any compiler version back to rustc 1.31; it uses nothing from the Rust standard library so is usable from no_std crates.* [paper]: https://dl.acm.org/citation.cfm?id=3192369 -[upstream]: https://github.com/ulfjack/ryu/tree/688f43b62276b400728baad54afc32c3ab9c1a95 +[upstream]: https://github.com/ulfjack/ryu/tree/1c413e127f8d02afd12eb6259bc80163722f1385 ```toml [dependencies] @@ -69,7 +69,7 @@ $ cargo bench ``` -The benchmark shows Ryu approximately 4-10x faster than the standard library +The benchmark shows Ryū approximately 4-10x faster than the standard library across a range of f32 and f64 inputs. Measurements are in nanoseconds per iteration; smaller is better. diff -Nru cargo-0.44.1/vendor/ryu/src/buffer/mod.rs cargo-0.47.0/vendor/ryu/src/buffer/mod.rs --- cargo-0.44.1/vendor/ryu/src/buffer/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/src/buffer/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,10 +1,7 @@ -use core::{mem, slice, str}; - +use crate::raw; #[cfg(maybe_uninit)] use core::mem::MaybeUninit; - -use raw; - +use core::{mem, slice, str}; #[cfg(feature = "no-panic")] use no_panic::no_panic; @@ -16,12 +13,11 @@ /// /// ## Example /// -/// ```edition2018 +/// ``` /// let mut buffer = ryu::Buffer::new(); /// let printed = buffer.format_finite(1.234); /// assert_eq!(printed, "1.234"); /// ``` -#[derive(Copy, Clone)] pub struct Buffer { #[cfg(maybe_uninit)] bytes: [MaybeUninit; 24], @@ -93,6 +89,15 @@ } } +impl Copy for Buffer {} + +impl Clone for Buffer { + #[inline] + fn clone(&self) -> Self { + Buffer::new() + } +} + impl Default for Buffer { #[inline] #[cfg_attr(feature = "no-panic", no_panic)] diff -Nru cargo-0.44.1/vendor/ryu/src/common.rs cargo-0.47.0/vendor/ryu/src/common.rs --- cargo-0.44.1/vendor/ryu/src/common.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/src/common.rs 2020-10-01 21:38:28.000000000 +0000 @@ -18,6 +18,8 @@ // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. +// Returns the number of decimal digits in v, which must not contain more than 9 +// digits. #[cfg_attr(feature = "no-panic", inline)] pub fn decimal_length9(v: u32) -> u32 { // Function precondition: v is not a 10-digit number. @@ -45,29 +47,47 @@ } } -// Returns e == 0 ? 1 : ceil(log_2(5^e)). +// Returns e == 0 ? 1 : [log_2(5^e)]; requires 0 <= e <= 3528. #[cfg_attr(feature = "no-panic", inline)] -pub fn pow5bits(e: i32) -> i32 { - // This approximation works up to the point that the multiplication overflows at e = 3529. - // If the multiplication were done in 64 bits, it would fail at 5^4004 which is just greater - // than 2^9297. +#[allow(dead_code)] +pub fn log2_pow5(e: i32) -> i32 /* or u32 -> u32 */ { + // This approximation works up to the point that the multiplication + // overflows at e = 3529. If the multiplication were done in 64 bits, it + // would fail at 5^4004 which is just greater than 2^9297. + debug_assert!(e >= 0); + debug_assert!(e <= 3528); + ((e as u32 * 1217359) >> 19) as i32 +} + +// Returns e == 0 ? 1 : ceil(log_2(5^e)); requires 0 <= e <= 3528. +#[cfg_attr(feature = "no-panic", inline)] +pub fn pow5bits(e: i32) -> i32 /* or u32 -> u32 */ { + // This approximation works up to the point that the multiplication + // overflows at e = 3529. If the multiplication were done in 64 bits, it + // would fail at 5^4004 which is just greater than 2^9297. debug_assert!(e >= 0); debug_assert!(e <= 3528); (((e as u32 * 1217359) >> 19) + 1) as i32 } -// Returns floor(log_10(2^e)). #[cfg_attr(feature = "no-panic", inline)] -pub fn log10_pow2(e: i32) -> u32 { +#[allow(dead_code)] +pub fn ceil_log2_pow5(e: i32) -> i32 /* or u32 -> u32 */ { + log2_pow5(e) + 1 +} + +// Returns floor(log_10(2^e)); requires 0 <= e <= 1650. +#[cfg_attr(feature = "no-panic", inline)] +pub fn log10_pow2(e: i32) -> u32 /* or u32 -> u32 */ { // The first value this approximation fails for is 2^1651 which is just greater than 10^297. debug_assert!(e >= 0); debug_assert!(e <= 1650); (e as u32 * 78913) >> 18 } -// Returns floor(log_10(5^e)). +// Returns floor(log_10(5^e)); requires 0 <= e <= 2620. #[cfg_attr(feature = "no-panic", inline)] -pub fn log10_pow5(e: i32) -> u32 { +pub fn log10_pow5(e: i32) -> u32 /* or u32 -> u32 */ { // The first value this approximation fails for is 5^2621 which is just greater than 10^1832. debug_assert!(e >= 0); debug_assert!(e <= 2620); diff -Nru cargo-0.44.1/vendor/ryu/src/d2s_full_table.rs cargo-0.47.0/vendor/ryu/src/d2s_full_table.rs --- cargo-0.44.1/vendor/ryu/src/d2s_full_table.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/src/d2s_full_table.rs 2020-10-01 21:38:28.000000000 +0000 @@ -18,626 +18,679 @@ // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. -pub static DOUBLE_POW5_INV_SPLIT: [(u64, u64); 292] = [ - (1, 288230376151711744), - (3689348814741910324, 230584300921369395), - (2951479051793528259, 184467440737095516), - (17118578500402463900, 147573952589676412), - (12632330341676300947, 236118324143482260), - (10105864273341040758, 188894659314785808), - (15463389048156653253, 151115727451828646), - (17362724847566824558, 241785163922925834), - (17579528692795369969, 193428131138340667), - (6684925324752475329, 154742504910672534), - (18074578149087781173, 247588007857076054), - (18149011334012135262, 198070406285660843), - (3451162622983977240, 158456325028528675), - (5521860196774363583, 253530120045645880), - (4417488157419490867, 202824096036516704), - (7223339340677503017, 162259276829213363), - (7867994130342094503, 259614842926741381), - (2605046489531765280, 207691874341393105), - (2084037191625412224, 166153499473114484), - (10713157136084480204, 265845599156983174), - (12259874523609494487, 212676479325586539), - (13497248433629505913, 170141183460469231), - (14216899864323388813, 272225893536750770), - (11373519891458711051, 217780714829400616), - (5409467098425058518, 174224571863520493), - (4965798542738183305, 278759314981632789), - (7661987648932456967, 223007451985306231), - (2440241304404055250, 178405961588244985), - (3904386087046488400, 285449538541191976), - (17880904128604832013, 228359630832953580), - (14304723302883865611, 182687704666362864), - (15133127457049002812, 146150163733090291), - (16834306301794583852, 233840261972944466), - (9778096226693756759, 187072209578355573), - (15201174610838826053, 149657767662684458), - (2185786488890659746, 239452428260295134), - (5437978005854438120, 191561942608236107), - (15418428848909281466, 153249554086588885), - (6222742084545298729, 245199286538542217), - (16046240111861969953, 196159429230833773), - (1768945645263844993, 156927543384667019), - (10209010661905972635, 251084069415467230), - (8167208529524778108, 200867255532373784), - (10223115638361732810, 160693804425899027), - (1599589762411131202, 257110087081438444), - (4969020624670815285, 205688069665150755), - (3975216499736652228, 164550455732120604), - (13739044029062464211, 263280729171392966), - (7301886408508061046, 210624583337114373), - (13220206756290269483, 168499666669691498), - (17462981995322520850, 269599466671506397), - (6591687966774196033, 215679573337205118), - (12652048002903177473, 172543658669764094), - (9175230360419352987, 276069853871622551), - (3650835473593572067, 220855883097298041), - (17678063637842498946, 176684706477838432), - (13527506561580357021, 282695530364541492), - (3443307619780464970, 226156424291633194), - (6443994910566282300, 180925139433306555), - (5155195928453025840, 144740111546645244), - (15627011115008661990, 231584178474632390), - (12501608892006929592, 185267342779705912), - (2622589484121723027, 148213874223764730), - (4196143174594756843, 237142198758023568), - (10735612169159626121, 189713759006418854), - (12277838550069611220, 151771007205135083), - (15955192865369467629, 242833611528216133), - (1696107848069843133, 194266889222572907), - (12424932722681605476, 155413511378058325), - (1433148282581017146, 248661618204893321), - (15903913885032455010, 198929294563914656), - (9033782293284053685, 159143435651131725), - (14454051669254485895, 254629497041810760), - (11563241335403588716, 203703597633448608), - (16629290697806691620, 162962878106758886), - (781423413297334329, 260740604970814219), - (4314487545379777786, 208592483976651375), - (3451590036303822229, 166873987181321100), - (5522544058086115566, 266998379490113760), - (4418035246468892453, 213598703592091008), - (10913125826658934609, 170878962873672806), - (10082303693170474728, 273406340597876490), - (8065842954536379782, 218725072478301192), - (17520720807854834795, 174980057982640953), - (5897060404116273733, 279968092772225526), - (1028299508551108663, 223974474217780421), - (15580034865808528224, 179179579374224336), - (17549358155809824511, 286687326998758938), - (2971440080422128639, 229349861599007151), - (17134547323305344204, 183479889279205720), - (13707637858644275364, 146783911423364576), - (14553522944347019935, 234854258277383322), - (4264120725993795302, 187883406621906658), - (10789994210278856888, 150306725297525326), - (9885293106962350374, 240490760476040522), - (529536856086059653, 192392608380832418), - (7802327114352668369, 153914086704665934), - (1415676938738538420, 246262538727465495), - (1132541550990830736, 197010030981972396), - (15663428499760305882, 157608024785577916), - (17682787970132668764, 252172839656924666), - (10456881561364224688, 201738271725539733), - (15744202878575200397, 161390617380431786), - (17812026976236499989, 258224987808690858), - (3181575136763469022, 206579990246952687), - (13613306553636506187, 165263992197562149), - (10713244041592678929, 264422387516099439), - (12259944048016053467, 211537910012879551), - (6118606423670932450, 169230328010303641), - (2411072648389671274, 270768524816485826), - (16686253377679378312, 216614819853188660), - (13349002702143502650, 173291855882550928), - (17669055508687693916, 277266969412081485), - (14135244406950155133, 221813575529665188), - (240149081334393137, 177450860423732151), - (11452284974360759988, 283921376677971441), - (5472479164746697667, 227137101342377153), - (11756680961281178780, 181709681073901722), - (2026647139541122378, 145367744859121378), - (18000030682233437097, 232588391774594204), - (18089373360528660001, 186070713419675363), - (3403452244197197031, 148856570735740291), - (16513570034941246220, 238170513177184465), - (13210856027952996976, 190536410541747572), - (3189987192878576934, 152429128433398058), - (1414630693863812771, 243886605493436893), - (8510402184574870864, 195109284394749514), - (10497670562401807014, 156087427515799611), - (9417575270359070576, 249739884025279378), - (14912757845771077107, 199791907220223502), - (4551508647133041040, 159833525776178802), - (10971762650154775986, 255733641241886083), - (16156107749607641435, 204586912993508866), - (9235537384944202825, 163669530394807093), - (11087511001168814197, 261871248631691349), - (12559357615676961681, 209496998905353079), - (13736834907283479668, 167597599124282463), - (18289587036911657145, 268156158598851941), - (10942320814787415393, 214524926879081553), - (16132554281313752961, 171619941503265242), - (11054691591134363444, 274591906405224388), - (16222450902391311402, 219673525124179510), - (12977960721913049122, 175738820099343608), - (17075388340318968271, 281182112158949773), - (2592264228029443648, 224945689727159819), - (5763160197165465241, 179956551781727855), - (9221056315464744386, 287930482850764568), - (14755542681855616155, 230344386280611654), - (15493782960226403247, 184275509024489323), - (1326979923955391628, 147420407219591459), - (9501865507812447252, 235872651551346334), - (11290841220991868125, 188698121241077067), - (1653975347309673853, 150958496992861654), - (10025058185179298811, 241533595188578646), - (4330697733401528726, 193226876150862917), - (14532604630946953951, 154581500920690333), - (1116074521063664381, 247330401473104534), - (4582208431592841828, 197864321178483627), - (14733813189500004432, 158291456942786901), - (16195403473716186445, 253266331108459042), - (5577625149489128510, 202613064886767234), - (8151448934333213131, 162090451909413787), - (16731667109675051333, 259344723055062059), - (17074682502481951390, 207475778444049647), - (6281048372501740465, 165980622755239718), - (6360328581260874421, 265568996408383549), - (8777611679750609860, 212455197126706839), - (10711438158542398211, 169964157701365471), - (9759603424184016492, 271942652322184754), - (11497031554089123517, 217554121857747803), - (16576322872755119460, 174043297486198242), - (11764721337440549842, 278469275977917188), - (16790474699436260520, 222775420782333750), - (13432379759549008416, 178220336625867000), - (3045063541568861850, 285152538601387201), - (17193446092222730773, 228122030881109760), - (13754756873778184618, 182497624704887808), - (18382503128506368341, 145998099763910246), - (3586563302416817083, 233596959622256395), - (2869250641933453667, 186877567697805116), - (17052795772514404226, 149502054158244092), - (12527077977055405469, 239203286653190548), - (17400360011128145022, 191362629322552438), - (2852241564676785048, 153090103458041951), - (15631632947708587046, 244944165532867121), - (8815957543424959314, 195955332426293697), - (18120812478965698421, 156764265941034957), - (14235904707377476180, 250822825505655932), - (4010026136418160298, 200658260404524746), - (17965416168102169531, 160526608323619796), - (2919224165770098987, 256842573317791675), - (2335379332616079190, 205474058654233340), - (1868303466092863352, 164379246923386672), - (6678634360490491686, 263006795077418675), - (5342907488392393349, 210405436061934940), - (4274325990713914679, 168324348849547952), - (10528270399884173809, 269318958159276723), - (15801313949391159694, 215455166527421378), - (1573004715287196786, 172364133221937103), - (17274202803427156150, 275782613155099364), - (17508711057483635243, 220626090524079491), - (10317620031244997871, 176500872419263593), - (12818843235250086271, 282401395870821749), - (13944423402941979340, 225921116696657399), - (14844887537095493795, 180736893357325919), - (15565258844418305359, 144589514685860735), - (6457670077359736959, 231343223497377177), - (16234182506113520537, 185074578797901741), - (9297997190148906106, 148059663038321393), - (11187446689496339446, 236895460861314229), - (12639306166338981880, 189516368689051383), - (17490142562555006151, 151613094951241106), - (2158786396894637579, 242580951921985771), - (16484424376483351356, 194064761537588616), - (9498190686444770762, 155251809230070893), - (11507756283569722895, 248402894768113429), - (12895553841597688639, 198722315814490743), - (17695140702761971558, 158977852651592594), - (17244178680193423523, 254364564242548151), - (10105994129412828495, 203491651394038521), - (4395446488788352473, 162793321115230817), - (10722063196803274280, 260469313784369307), - (1198952927958798777, 208375451027495446), - (15716557601334680315, 166700360821996356), - (17767794532651667857, 266720577315194170), - (14214235626121334286, 213376461852155336), - (7682039686155157106, 170701169481724269), - (1223217053622520399, 273121871170758831), - (15735968901865657612, 218497496936607064), - (16278123936234436413, 174797997549285651), - (219556594781725998, 279676796078857043), - (7554342905309201445, 223741436863085634), - (9732823138989271479, 178993149490468507), - (815121763415193074, 286389039184749612), - (11720143854957885429, 229111231347799689), - (13065463898708218666, 183288985078239751), - (6763022304224664610, 146631188062591801), - (3442138057275642729, 234609900900146882), - (13821756890046245153, 187687920720117505), - (11057405512036996122, 150150336576094004), - (6623802375033462826, 240240538521750407), - (16367088344252501231, 192192430817400325), - (13093670675402000985, 153753944653920260), - (2503129006933649959, 246006311446272417), - (13070549649772650937, 196805049157017933), - (17835137349301941396, 157444039325614346), - (2710778055689733971, 251910462920982955), - (2168622444551787177, 201528370336786364), - (5424246770383340065, 161222696269429091), - (1300097203129523457, 257956314031086546), - (15797473021471260058, 206365051224869236), - (8948629602435097724, 165092040979895389), - (3249760919670425388, 264147265567832623), - (9978506365220160957, 211317812454266098), - (15361502721659949412, 169054249963412878), - (2442311466204457120, 270486799941460606), - (16711244431931206989, 216389439953168484), - (17058344360286875914, 173111551962534787), - (12535955717491360170, 276978483140055660), - (10028764573993088136, 221582786512044528), - (15401709288678291155, 177266229209635622), - (9885339602917624555, 283625966735416996), - (4218922867592189321, 226900773388333597), - (14443184738299482427, 181520618710666877), - (4175850161155765295, 145216494968533502), - (10370709072591134795, 232346391949653603), - (15675264887556728482, 185877113559722882), - (5161514280561562140, 148701690847778306), - (879725219414678777, 237922705356445290), - (703780175531743021, 190338164285156232), - (11631070584651125387, 152270531428124985), - (162968861732249003, 243632850284999977), - (11198421533611530172, 194906280227999981), - (5269388412147313814, 155925024182399985), - (8431021459435702103, 249480038691839976), - (3055468352806651359, 199584030953471981), - (17201769941212962380, 159667224762777584), - (16454785461715008838, 255467559620444135), - (13163828369372007071, 204374047696355308), - (17909760324981426303, 163499238157084246), - (2830174816776909822, 261598781051334795), - (2264139853421527858, 209279024841067836), - (16568707141704863579, 167423219872854268), - (4373838538276319787, 267877151796566830), - (3499070830621055830, 214301721437253464), - (6488605479238754987, 171441377149802771), - (3003071137298187333, 274306203439684434), - (6091805724580460189, 219444962751747547), - (15941491023890099121, 175555970201398037), - (10748990379256517301, 280889552322236860), - (8599192303405213841, 224711641857789488), - (14258051472207991719, 179769313486231590), +const DOUBLE_POW5_INV_TABLE_SIZE: usize = 342; +const DOUBLE_POW5_TABLE_SIZE: usize = 326; + +pub static DOUBLE_POW5_INV_SPLIT: [(u64, u64); DOUBLE_POW5_INV_TABLE_SIZE] = [ + (1, 2305843009213693952), + (11068046444225730970, 1844674407370955161), + (5165088340638674453, 1475739525896764129), + (7821419487252849886, 1180591620717411303), + (8824922364862649494, 1888946593147858085), + (7059937891890119595, 1511157274518286468), + (13026647942995916322, 1208925819614629174), + (9774590264567735146, 1934281311383406679), + (11509021026396098440, 1547425049106725343), + (16585914450600699399, 1237940039285380274), + (15469416676735388068, 1980704062856608439), + (16064882156130220778, 1584563250285286751), + (9162556910162266299, 1267650600228229401), + (7281393426775805432, 2028240960365167042), + (16893161185646375315, 1622592768292133633), + (2446482504291369283, 1298074214633706907), + (7603720821608101175, 2076918743413931051), + (2393627842544570617, 1661534994731144841), + (16672297533003297786, 1329227995784915872), + (11918280793837635165, 2126764793255865396), + (5845275820328197809, 1701411834604692317), + (15744267100488289217, 1361129467683753853), + (3054734472329800808, 2177807148294006166), + (17201182836831481939, 1742245718635204932), + (6382248639981364905, 1393796574908163946), + (2832900194486363201, 2230074519853062314), + (5955668970331000884, 1784059615882449851), + (1075186361522890384, 1427247692705959881), + (12788344622662355584, 2283596308329535809), + (13920024512871794791, 1826877046663628647), + (3757321980813615186, 1461501637330902918), + (10384555214134712795, 1169201309864722334), + (5547241898389809503, 1870722095783555735), + (4437793518711847602, 1496577676626844588), + (10928932444453298728, 1197262141301475670), + (17486291911125277965, 1915619426082361072), + (6610335899416401726, 1532495540865888858), + (12666966349016942027, 1225996432692711086), + (12888448528943286597, 1961594292308337738), + (17689456452638449924, 1569275433846670190), + (14151565162110759939, 1255420347077336152), + (7885109000409574610, 2008672555323737844), + (9997436015069570011, 1606938044258990275), + (7997948812055656009, 1285550435407192220), + (12796718099289049614, 2056880696651507552), + (2858676849947419045, 1645504557321206042), + (13354987924183666206, 1316403645856964833), + (17678631863951955605, 2106245833371143733), + (3074859046935833515, 1684996666696914987), + (13527933681774397782, 1347997333357531989), + (10576647446613305481, 2156795733372051183), + (15840015586774465031, 1725436586697640946), + (8982663654677661702, 1380349269358112757), + (18061610662226169046, 2208558830972980411), + (10759939715039024913, 1766847064778384329), + (12297300586773130254, 1413477651822707463), + (15986332124095098083, 2261564242916331941), + (9099716884534168143, 1809251394333065553), + (14658471137111155161, 1447401115466452442), + (4348079280205103483, 1157920892373161954), + (14335624477811986218, 1852673427797059126), + (7779150767507678651, 1482138742237647301), + (2533971799264232598, 1185710993790117841), + (15122401323048503126, 1897137590064188545), + (12097921058438802501, 1517710072051350836), + (5988988032009131678, 1214168057641080669), + (16961078480698431330, 1942668892225729070), + (13568862784558745064, 1554135113780583256), + (7165741412905085728, 1243308091024466605), + (11465186260648137165, 1989292945639146568), + (16550846638002330379, 1591434356511317254), + (16930026125143774626, 1273147485209053803), + (4951948911778577463, 2037035976334486086), + (272210314680951647, 1629628781067588869), + (3907117066486671641, 1303703024854071095), + (6251387306378674625, 2085924839766513752), + (16069156289328670670, 1668739871813211001), + (9165976216721026213, 1334991897450568801), + (7286864317269821294, 2135987035920910082), + (16897537898041588005, 1708789628736728065), + (13518030318433270404, 1367031702989382452), + (6871453250525591353, 2187250724783011924), + (9186511415162383406, 1749800579826409539), + (11038557946871817048, 1399840463861127631), + (10282995085511086630, 2239744742177804210), + (8226396068408869304, 1791795793742243368), + (13959814484210916090, 1433436634993794694), + (11267656730511734774, 2293498615990071511), + (5324776569667477496, 1834798892792057209), + (7949170070475892320, 1467839114233645767), + (17427382500606444826, 1174271291386916613), + (5747719112518849781, 1878834066219066582), + (15666221734240810795, 1503067252975253265), + (12532977387392648636, 1202453802380202612), + (5295368560860596524, 1923926083808324180), + (4236294848688477220, 1539140867046659344), + (7078384693692692099, 1231312693637327475), + (11325415509908307358, 1970100309819723960), + (9060332407926645887, 1576080247855779168), + (14626963555825137356, 1260864198284623334), + (12335095245094488799, 2017382717255397335), + (9868076196075591040, 1613906173804317868), + (15273158586344293478, 1291124939043454294), + (13369007293925138595, 2065799902469526871), + (7005857020398200553, 1652639921975621497), + (16672732060544291412, 1322111937580497197), + (11918976037903224966, 2115379100128795516), + (5845832015580669650, 1692303280103036413), + (12055363241948356366, 1353842624082429130), + (841837113407818570, 2166148198531886609), + (4362818505468165179, 1732918558825509287), + (14558301248600263113, 1386334847060407429), + (12225235553534690011, 2218135755296651887), + (2401490813343931363, 1774508604237321510), + (1921192650675145090, 1419606883389857208), + (17831303500047873437, 2271371013423771532), + (6886345170554478103, 1817096810739017226), + (1819727321701672159, 1453677448591213781), + (16213177116328979020, 1162941958872971024), + (14873036941900635463, 1860707134196753639), + (15587778368262418694, 1488565707357402911), + (8780873879868024632, 1190852565885922329), + (2981351763563108441, 1905364105417475727), + (13453127855076217722, 1524291284333980581), + (7073153469319063855, 1219433027467184465), + (11317045550910502167, 1951092843947495144), + (12742985255470312057, 1560874275157996115), + (10194388204376249646, 1248699420126396892), + (1553625868034358140, 1997919072202235028), + (8621598323911307159, 1598335257761788022), + (17965325103354776697, 1278668206209430417), + (13987124906400001422, 2045869129935088668), + (121653480894270168, 1636695303948070935), + (97322784715416134, 1309356243158456748), + (14913111714512307107, 2094969989053530796), + (8241140556867935363, 1675975991242824637), + (17660958889720079260, 1340780792994259709), + (17189487779326395846, 2145249268790815535), + (13751590223461116677, 1716199415032652428), + (18379969808252713988, 1372959532026121942), + (14650556434236701088, 2196735251241795108), + (652398703163629901, 1757388200993436087), + (11589965406756634890, 1405910560794748869), + (7475898206584884855, 2249456897271598191), + (2291369750525997561, 1799565517817278553), + (9211793429904618695, 1439652414253822842), + (18428218302589300235, 2303443862806116547), + (7363877012587619542, 1842755090244893238), + (13269799239553916280, 1474204072195914590), + (10615839391643133024, 1179363257756731672), + (2227947767661371545, 1886981212410770676), + (16539753473096738529, 1509584969928616540), + (13231802778477390823, 1207667975942893232), + (6413489186596184024, 1932268761508629172), + (16198837793502678189, 1545815009206903337), + (5580372605318321905, 1236652007365522670), + (8928596168509315048, 1978643211784836272), + (18210923379033183008, 1582914569427869017), + (7190041073742725760, 1266331655542295214), + (436019273762630246, 2026130648867672343), + (7727513048493924843, 1620904519094137874), + (9871359253537050198, 1296723615275310299), + (4726128361433549347, 2074757784440496479), + (7470251503888749801, 1659806227552397183), + (13354898832594820487, 1327844982041917746), + (13989140502667892133, 2124551971267068394), + (14880661216876224029, 1699641577013654715), + (11904528973500979224, 1359713261610923772), + (4289851098633925465, 2175541218577478036), + (18189276137874781665, 1740432974861982428), + (3483374466074094362, 1392346379889585943), + (1884050330976640656, 2227754207823337509), + (5196589079523222848, 1782203366258670007), + (15225317707844309248, 1425762693006936005), + (5913764258841343181, 2281220308811097609), + (8420360221814984868, 1824976247048878087), + (17804334621677718864, 1459980997639102469), + (17932816512084085415, 1167984798111281975), + (10245762345624985047, 1868775676978051161), + (4507261061758077715, 1495020541582440929), + (7295157664148372495, 1196016433265952743), + (7982903447895485668, 1913626293225524389), + (10075671573058298858, 1530901034580419511), + (4371188443704728763, 1224720827664335609), + (14372599139411386667, 1959553324262936974), + (15187428126271019657, 1567642659410349579), + (15839291315758726049, 1254114127528279663), + (3206773216762499739, 2006582604045247462), + (13633465017635730761, 1605266083236197969), + (14596120828850494932, 1284212866588958375), + (4907049252451240275, 2054740586542333401), + (236290587219081897, 1643792469233866721), + (14946427728742906810, 1315033975387093376), + (16535586736504830250, 2104054360619349402), + (5849771759720043554, 1683243488495479522), + (15747863852001765813, 1346594790796383617), + (10439186904235184007, 2154551665274213788), + (15730047152871967852, 1723641332219371030), + (12584037722297574282, 1378913065775496824), + (9066413911450387881, 2206260905240794919), + (10942479943902220628, 1765008724192635935), + (8753983955121776503, 1412006979354108748), + (10317025513452932081, 2259211166966573997), + (874922781278525018, 1807368933573259198), + (8078635854506640661, 1445895146858607358), + (13841606313089133175, 1156716117486885886), + (14767872471458792434, 1850745787979017418), + (746251532941302978, 1480596630383213935), + (597001226353042382, 1184477304306571148), + (15712597221132509104, 1895163686890513836), + (8880728962164096960, 1516130949512411069), + (10793931984473187891, 1212904759609928855), + (17270291175157100626, 1940647615375886168), + (2748186495899949531, 1552518092300708935), + (2198549196719959625, 1242014473840567148), + (18275073973719576693, 1987223158144907436), + (10930710364233751031, 1589778526515925949), + (12433917106128911148, 1271822821212740759), + (8826220925580526867, 2034916513940385215), + (7060976740464421494, 1627933211152308172), + (16716827836597268165, 1302346568921846537), + (11989529279587987770, 2083754510274954460), + (9591623423670390216, 1667003608219963568), + (15051996368420132820, 1333602886575970854), + (13015147745246481542, 2133764618521553367), + (3033420566713364587, 1707011694817242694), + (6116085268112601993, 1365609355853794155), + (9785736428980163188, 2184974969366070648), + (15207286772667951197, 1747979975492856518), + (1097782973908629988, 1398383980394285215), + (1756452758253807981, 2237414368630856344), + (5094511021344956708, 1789931494904685075), + (4075608817075965366, 1431945195923748060), + (6520974107321544586, 2291112313477996896), + (1527430471115325346, 1832889850782397517), + (12289990821117991246, 1466311880625918013), + (17210690286378213644, 1173049504500734410), + (9090360384495590213, 1876879207201175057), + (18340334751822203140, 1501503365760940045), + (14672267801457762512, 1201202692608752036), + (16096930852848599373, 1921924308174003258), + (1809498238053148529, 1537539446539202607), + (12515645034668249793, 1230031557231362085), + (1578287981759648052, 1968050491570179337), + (12330676829633449412, 1574440393256143469), + (13553890278448669853, 1259552314604914775), + (3239480371808320148, 2015283703367863641), + (17348979556414297411, 1612226962694290912), + (6500486015647617283, 1289781570155432730), + (10400777625036187652, 2063650512248692368), + (15699319729512770768, 1650920409798953894), + (16248804598352126938, 1320736327839163115), + (7551343283653851484, 2113178124542660985), + (6041074626923081187, 1690542499634128788), + (12211557331022285596, 1352433999707303030), + (1091747655926105338, 2163894399531684849), + (4562746939482794594, 1731115519625347879), + (7339546366328145998, 1384892415700278303), + (8053925371383123274, 2215827865120445285), + (6443140297106498619, 1772662292096356228), + (12533209867169019542, 1418129833677084982), + (5295740528502789974, 2269007733883335972), + (15304638867027962949, 1815206187106668777), + (4865013464138549713, 1452164949685335022), + (14960057215536570740, 1161731959748268017), + (9178696285890871890, 1858771135597228828), + (14721654658196518159, 1487016908477783062), + (4398626097073393881, 1189613526782226450), + (7037801755317430209, 1903381642851562320), + (5630241404253944167, 1522705314281249856), + (814844308661245011, 1218164251424999885), + (1303750893857992017, 1949062802279999816), + (15800395974054034906, 1559250241823999852), + (5261619149759407279, 1247400193459199882), + (12107939454356961969, 1995840309534719811), + (5997002748743659252, 1596672247627775849), + (8486951013736837725, 1277337798102220679), + (2511075177753209390, 2043740476963553087), + (13076906586428298482, 1634992381570842469), + (14150874083884549109, 1307993905256673975), + (4194654460505726958, 2092790248410678361), + (18113118827372222859, 1674232198728542688), + (3422448617672047318, 1339385758982834151), + (16543964232501006678, 2143017214372534641), + (9545822571258895019, 1714413771498027713), + (15015355686490936662, 1371531017198422170), + (5577825024675947042, 2194449627517475473), + (11840957649224578280, 1755559702013980378), + (16851463748863483271, 1404447761611184302), + (12204946739213931940, 2247116418577894884), + (13453306206113055875, 1797693134862315907), + (3383947335406624054, 1438154507889852726), + (16482362180876329456, 2301047212623764361), + (9496540929959153242, 1840837770099011489), + (11286581558709232917, 1472670216079209191), + (5339916432225476010, 1178136172863367353), + (4854517476818851293, 1885017876581387765), + (3883613981455081034, 1508014301265110212), + (14174937629389795797, 1206411441012088169), + (11611853762797942306, 1930258305619341071), + (5600134195496443521, 1544206644495472857), + (15548153800622885787, 1235365315596378285), + (6430302007287065643, 1976584504954205257), + (16212288050055383484, 1581267603963364205), + (12969830440044306787, 1265014083170691364), + (9683682259845159889, 2024022533073106183), + (15125643437359948558, 1619218026458484946), + (8411165935146048523, 1295374421166787957), + (17147214310975587960, 2072599073866860731), + (10028422634038560045, 1658079259093488585), + (8022738107230848036, 1326463407274790868), + (9147032156827446534, 2122341451639665389), + (11006974540203867551, 1697873161311732311), + (5116230817421183718, 1358298529049385849), + (15564666937357714594, 2173277646479017358), + (1383687105660440706, 1738622117183213887), + (12174996128754083534, 1390897693746571109), + (8411947361780802685, 2225436309994513775), + (6729557889424642148, 1780349047995611020), + (5383646311539713719, 1424279238396488816), + (1235136468979721303, 2278846781434382106), + (15745504434151418335, 1823077425147505684), + (16285752362063044992, 1458461940118004547), + (5649904260166615347, 1166769552094403638), + (5350498001524674232, 1866831283351045821), + (591049586477829062, 1493465026680836657), + (11540886113407994219, 1194772021344669325), + (18673707743239135, 1911635234151470921), + (14772334225162232601, 1529308187321176736), + (8128518565387875758, 1223446549856941389), + (1937583260394870242, 1957514479771106223), + (8928764237799716840, 1566011583816884978), + (14521709019723594119, 1252809267053507982), + (8477339172590109297, 2004494827285612772), + (17849917782297818407, 1603595861828490217), + (6901236596354434079, 1282876689462792174), + (18420676183650915173, 2052602703140467478), + (3668494502695001169, 1642082162512373983), + (10313493231639821582, 1313665730009899186), + (9122891541139893884, 2101865168015838698), + (14677010862395735754, 1681492134412670958), + (673562245690857633, 1345193707530136767), ]; -pub static DOUBLE_POW5_SPLIT: [(u64, u64); 326] = [ - (0, 72057594037927936), - (0, 90071992547409920), - (0, 112589990684262400), - (0, 140737488355328000), - (0, 87960930222080000), - (0, 109951162777600000), - (0, 137438953472000000), - (0, 85899345920000000), - (0, 107374182400000000), - (0, 134217728000000000), - (0, 83886080000000000), - (0, 104857600000000000), - (0, 131072000000000000), - (0, 81920000000000000), - (0, 102400000000000000), - (0, 128000000000000000), - (0, 80000000000000000), - (0, 100000000000000000), - (0, 125000000000000000), - (0, 78125000000000000), - (0, 97656250000000000), - (0, 122070312500000000), - (0, 76293945312500000), - (0, 95367431640625000), - (0, 119209289550781250), - (4611686018427387904, 74505805969238281), - (10376293541461622784, 93132257461547851), - (8358680908399640576, 116415321826934814), - (612489549322387456, 72759576141834259), - (14600669991935148032, 90949470177292823), - (13639151471491547136, 113686837721616029), - (3213881284082270208, 142108547152020037), - (4314518811765112832, 88817841970012523), - (781462496279003136, 111022302462515654), - (10200200157203529728, 138777878078144567), - (13292654125893287936, 86736173798840354), - (7392445620511834112, 108420217248550443), - (4628871007212404736, 135525271560688054), - (16728102434789916672, 84703294725430033), - (7075069988205232128, 105879118406787542), - (18067209522111315968, 132348898008484427), - (8986162942105878528, 82718061255302767), - (6621017659204960256, 103397576569128459), - (3664586055578812416, 129246970711410574), - (16125424340018921472, 80779356694631608), - (1710036351314100224, 100974195868289511), - (15972603494424788992, 126217744835361888), - (9982877184015493120, 78886090522101180), - (12478596480019366400, 98607613152626475), - (10986559581596820096, 123259516440783094), - (2254913720070624656, 77037197775489434), - (12042014186943056628, 96296497219361792), - (15052517733678820785, 120370621524202240), - (9407823583549262990, 75231638452626400), - (11759779479436578738, 94039548065783000), - (14699724349295723422, 117549435082228750), - (4575641699882439235, 73468396926392969), - (10331238143280436948, 91835496157991211), - (8302361660673158281, 114794370197489014), - (1154580038986672043, 143492962746861268), - (9944984561221445835, 89683101716788292), - (12431230701526807293, 112103877145985365), - (1703980321626345405, 140129846432481707), - (17205888765512323542, 87581154020301066), - (12283988920035628619, 109476442525376333), - (1519928094762372062, 136845553156720417), - (12479170105294952299, 85528470722950260), - (15598962631618690374, 106910588403687825), - (5663645234241199255, 133638235504609782), - (17374836326682913246, 83523897190381113), - (7883487353071477846, 104404871487976392), - (9854359191339347308, 130506089359970490), - (10770660513014479971, 81566305849981556), - (13463325641268099964, 101957882312476945), - (2994098996302961243, 127447352890596182), - (15706369927971514489, 79654595556622613), - (5797904354682229399, 99568244445778267), - (2635694424925398845, 124460305557222834), - (6258995034005762182, 77787690973264271), - (3212057774079814824, 97234613716580339), - (17850130272881932242, 121543267145725423), - (18073860448192289507, 75964541966078389), - (8757267504958198172, 94955677457597987), - (6334898362770359811, 118694596821997484), - (13182683513586250689, 74184123013748427), - (11866668373555425458, 92730153767185534), - (5609963430089506015, 115912692208981918), - (17341285199088104971, 72445432630613698), - (12453234462005355406, 90556790788267123), - (10954857059079306353, 113195988485333904), - (13693571323849132942, 141494985606667380), - (17781854114260483896, 88434366004167112), - (3780573569116053255, 110542957505208891), - (114030942967678664, 138178696881511114), - (4682955357782187069, 86361685550944446), - (15077066234082509644, 107952106938680557), - (5011274737320973344, 134940133673350697), - (14661261756894078100, 84337583545844185), - (4491519140835433913, 105421979432305232), - (5614398926044292391, 131777474290381540), - (12732371365632458552, 82360921431488462), - (6692092170185797382, 102951151789360578), - (17588487249587022536, 128688939736700722), - (15604490549419276989, 80430587335437951), - (14893927168346708332, 100538234169297439), - (14005722942005997511, 125672792711621799), - (15671105866394830300, 78545495444763624), - (1142138259283986260, 98181869305954531), - (15262730879387146537, 122727336632443163), - (7233363790403272633, 76704585395276977), - (13653390756431478696, 95880731744096221), - (3231680390257184658, 119850914680120277), - (4325643253124434363, 74906821675075173), - (10018740084832930858, 93633527093843966), - (3300053069186387764, 117041908867304958), - (15897591223523656064, 73151193042065598), - (10648616992549794273, 91438991302581998), - (4087399203832467033, 114298739128227498), - (14332621041645359599, 142873423910284372), - (18181260187883125557, 89295889943927732), - (4279831161144355331, 111619862429909666), - (14573160988285219972, 139524828037387082), - (13719911636105650386, 87203017523366926), - (7926517508277287175, 109003771904208658), - (684774848491833161, 136254714880260823), - (7345513307948477581, 85159196800163014), - (18405263671790372785, 106448996000203767), - (18394893571310578077, 133061245000254709), - (13802651491282805250, 83163278125159193), - (3418256308821342851, 103954097656448992), - (4272820386026678563, 129942622070561240), - (2670512741266674102, 81214138794100775), - (17173198981865506339, 101517673492625968), - (3019754653622331308, 126897091865782461), - (4193189667727651020, 79310682416114038), - (14464859121514339583, 99138353020142547), - (13469387883465536574, 123922941275178184), - (8418367427165960359, 77451838296986365), - (15134645302384838353, 96814797871232956), - (471562554271496325, 121018497339041196), - (9518098633274461011, 75636560836900747), - (7285937273165688360, 94545701046125934), - (18330793628311886258, 118182126307657417), - (4539216990053847055, 73863828942285886), - (14897393274422084627, 92329786177857357), - (4786683537745442072, 115412232722321697), - (14520892257159371055, 72132645451451060), - (18151115321449213818, 90165806814313825), - (8853836096529353561, 112707258517892282), - (1843923083806916143, 140884073147365353), - (12681666973447792349, 88052545717103345), - (2017025661527576725, 110065682146379182), - (11744654113764246714, 137582102682973977), - (422879793461572340, 85988814176858736), - (528599741826965425, 107486017721073420), - (660749677283706782, 134357522151341775), - (7330497575943398595, 83973451344588609), - (13774807988356636147, 104966814180735761), - (3383451930163631472, 131208517725919702), - (15949715511634433382, 82005323578699813), - (6102086334260878016, 102506654473374767), - (3015921899398709616, 128133318091718459), - (18025852251620051174, 80083323807324036), - (4085571240815512351, 100104154759155046), - (14330336087874166247, 125130193448943807), - (15873989082562435760, 78206370905589879), - (15230800334775656796, 97757963631987349), - (5203442363187407284, 122197454539984187), - (946308467778435600, 76373409087490117), - (5794571603150432404, 95466761359362646), - (16466586540792816313, 119333451699203307), - (7985773578781816244, 74583407312002067), - (5370530955049882401, 93229259140002584), - (6713163693812353001, 116536573925003230), - (18030785363914884337, 72835358703127018), - (13315109668038829614, 91044198378908773), - (2808829029766373305, 113805247973635967), - (17346094342490130344, 142256559967044958), - (6229622945628943561, 88910349979403099), - (3175342663608791547, 111137937474253874), - (13192550366365765242, 138922421842817342), - (3633657960551215372, 86826513651760839), - (18377130505971182927, 108533142064701048), - (4524669058754427043, 135666427580876311), - (9745447189362598758, 84791517238047694), - (2958436949848472639, 105989396547559618), - (12921418224165366607, 132486745684449522), - (12687572408530742033, 82804216052780951), - (11247779492236039638, 103505270065976189), - (224666310012885835, 129381587582470237), - (2446259452971747599, 80863492239043898), - (12281196353069460307, 101079365298804872), - (15351495441336825384, 126349206623506090), - (14206370669262903769, 78968254139691306), - (8534591299723853903, 98710317674614133), - (15279925143082205283, 123387897093267666), - (14161639232853766206, 77117435683292291), - (13090363022639819853, 96396794604115364), - (16362953778299774816, 120495993255144205), - (12532689120651053212, 75309995784465128), - (15665861400813816515, 94137494730581410), - (10358954714162494836, 117671868413226763), - (4168503687137865320, 73544917758266727), - (598943590494943747, 91931147197833409), - (5360365506546067587, 114913933997291761), - (11312142901609972388, 143642417496614701), - (9375932322719926695, 89776510935384188), - (11719915403399908368, 112220638669230235), - (10038208235822497557, 140275798336537794), - (10885566165816448877, 87672373960336121), - (18218643725697949000, 109590467450420151), - (18161618638695048346, 136988084313025189), - (13656854658398099168, 85617552695640743), - (12459382304570236056, 107021940869550929), - (1739169825430631358, 133777426086938662), - (14922039196176308311, 83610891304336663), - (14040862976792997485, 104513614130420829), - (3716020665709083144, 130642017663026037), - (4628355925281870917, 81651261039391273), - (10397130925029726550, 102064076299239091), - (8384727637859770284, 127580095374048864), - (5240454773662356427, 79737559608780540), - (6550568467077945534, 99671949510975675), - (3576524565420044014, 124589936888719594), - (6847013871814915412, 77868710555449746), - (17782139376623420074, 97335888194312182), - (13004302183924499284, 121669860242890228), - (17351060901807587860, 76043662651806392), - (3242082053549933210, 95054578314757991), - (17887660622219580224, 118818222893447488), - (11179787888887237640, 74261389308404680), - (13974734861109047050, 92826736635505850), - (8245046539531533005, 116033420794382313), - (16682369133275677888, 72520887996488945), - (7017903361312433648, 90651109995611182), - (17995751238495317868, 113313887494513977), - (8659630992836983623, 141642359368142472), - (5412269370523114764, 88526474605089045), - (11377022731581281359, 110658093256361306), - (4997906377621825891, 138322616570451633), - (14652906532082110942, 86451635356532270), - (9092761128247862869, 108064544195665338), - (2142579373455052779, 135080680244581673), - (12868327154477877747, 84425425152863545), - (2250350887815183471, 105531781441079432), - (2812938609768979339, 131914726801349290), - (6369772649532999991, 82446704250843306), - (17185587848771025797, 103058380313554132), - (3035240737254230630, 128822975391942666), - (6508711479211282048, 80514359619964166), - (17359261385868878368, 100642949524955207), - (17087390713908710056, 125803686906194009), - (3762090168551861929, 78627304316371256), - (4702612710689827411, 98284130395464070), - (15101637925217060072, 122855162994330087), - (16356052730901744401, 76784476871456304), - (1998321839917628885, 95980596089320381), - (7109588318324424010, 119975745111650476), - (13666864735807540814, 74984840694781547), - (12471894901332038114, 93731050868476934), - (6366496589810271835, 117163813585596168), - (3979060368631419896, 73227383490997605), - (9585511479216662775, 91534229363747006), - (2758517312166052660, 114417786704683758), - (12671518677062341634, 143022233380854697), - (1002170145522881665, 89388895863034186), - (10476084718758377889, 111736119828792732), - (13095105898447972362, 139670149785990915), - (5878598177316288774, 87293843616244322), - (16571619758500136775, 109117304520305402), - (11491152661270395161, 136396630650381753), - (264441385652915120, 85247894156488596), - (330551732066143900, 106559867695610745), - (5024875683510067779, 133199834619513431), - (10058076329834874218, 83249896637195894), - (3349223375438816964, 104062370796494868), - (4186529219298521205, 130077963495618585), - (14145795808130045513, 81298727184761615), - (13070558741735168987, 101623408980952019), - (11726512408741573330, 127029261226190024), - (7329070255463483331, 79393288266368765), - (13773023837756742068, 99241610332960956), - (17216279797195927585, 124052012916201195), - (8454331864033760789, 77532508072625747), - (5956228811614813082, 96915635090782184), - (7445286014518516353, 121144543863477730), - (9264989777501460624, 75715339914673581), - (16192923240304213684, 94644174893341976), - (1794409976670715490, 118305218616677471), - (8039035263060279037, 73940761635423419), - (5437108060397960892, 92425952044279274), - (16019757112352226923, 115532440055349092), - (788976158365366019, 72207775034593183), - (14821278253238871236, 90259718793241478), - (9303225779693813237, 112824648491551848), - (11629032224617266546, 141030810614439810), - (11879831158813179495, 88144256634024881), - (1014730893234310657, 110180320792531102), - (10491785653397664129, 137725400990663877), - (8863209042587234033, 86078375619164923), - (6467325284806654637, 107597969523956154), - (17307528642863094104, 134497461904945192), - (10817205401789433815, 84060913690590745), - (18133192770664180173, 105076142113238431), - (18054804944902837312, 131345177641548039), - (18201782118205355176, 82090736025967524), - (4305483574047142354, 102613420032459406), - (14605226504413703751, 128266775040574257), - (2210737537617482988, 80166734400358911), - (16598479977304017447, 100208418000448638), - (11524727934775246001, 125260522500560798), - (2591268940807140847, 78287826562850499), - (17074144231291089770, 97859783203563123), - (16730994270686474309, 122324729004453904), - (10456871419179046443, 76452955627783690), - (3847717237119032246, 95566194534729613), - (9421332564826178211, 119457743168412016), - (5888332853016361382, 74661089480257510), - (16583788103125227536, 93326361850321887), - (16118049110479146516, 116657952312902359), - (16991309721690548428, 72911220195563974), - (12015765115258409727, 91139025244454968), - (15019706394073012159, 113923781555568710), - (9551260955736489391, 142404726944460888), - (5969538097335305869, 89002954340288055), - (2850236603241744433, 111253692925360069), +pub static DOUBLE_POW5_SPLIT: [(u64, u64); DOUBLE_POW5_TABLE_SIZE] = [ + (0, 1152921504606846976), + (0, 1441151880758558720), + (0, 1801439850948198400), + (0, 2251799813685248000), + (0, 1407374883553280000), + (0, 1759218604441600000), + (0, 2199023255552000000), + (0, 1374389534720000000), + (0, 1717986918400000000), + (0, 2147483648000000000), + (0, 1342177280000000000), + (0, 1677721600000000000), + (0, 2097152000000000000), + (0, 1310720000000000000), + (0, 1638400000000000000), + (0, 2048000000000000000), + (0, 1280000000000000000), + (0, 1600000000000000000), + (0, 2000000000000000000), + (0, 1250000000000000000), + (0, 1562500000000000000), + (0, 1953125000000000000), + (0, 1220703125000000000), + (0, 1525878906250000000), + (0, 1907348632812500000), + (0, 1192092895507812500), + (0, 1490116119384765625), + (4611686018427387904, 1862645149230957031), + (9799832789158199296, 1164153218269348144), + (12249790986447749120, 1455191522836685180), + (15312238733059686400, 1818989403545856475), + (14528612397897220096, 2273736754432320594), + (13692068767113150464, 1421085471520200371), + (12503399940464050176, 1776356839400250464), + (15629249925580062720, 2220446049250313080), + (9768281203487539200, 1387778780781445675), + (7598665485932036096, 1734723475976807094), + (274959820560269312, 2168404344971008868), + (9395221924704944128, 1355252715606880542), + (2520655369026404352, 1694065894508600678), + (12374191248137781248, 2117582368135750847), + (14651398557727195136, 1323488980084844279), + (13702562178731606016, 1654361225106055349), + (3293144668132343808, 2067951531382569187), + (18199116482078572544, 1292469707114105741), + (8913837547316051968, 1615587133892632177), + (15753982952572452864, 2019483917365790221), + (12152082354571476992, 1262177448353618888), + (15190102943214346240, 1577721810442023610), + (9764256642163156992, 1972152263052529513), + (17631875447420442880, 1232595164407830945), + (8204786253993389888, 1540743955509788682), + (1032610780636961552, 1925929944387235853), + (2951224747111794922, 1203706215242022408), + (3689030933889743652, 1504632769052528010), + (13834660704216955373, 1880790961315660012), + (17870034976990372916, 1175494350822287507), + (17725857702810578241, 1469367938527859384), + (3710578054803671186, 1836709923159824231), + (26536550077201078, 2295887403949780289), + (11545800389866720434, 1434929627468612680), + (14432250487333400542, 1793662034335765850), + (8816941072311974870, 2242077542919707313), + (17039803216263454053, 1401298464324817070), + (12076381983474541759, 1751623080406021338), + (5872105442488401391, 2189528850507526673), + (15199280947623720629, 1368455531567204170), + (9775729147674874978, 1710569414459005213), + (16831347453020981627, 2138211768073756516), + (1296220121283337709, 1336382355046097823), + (15455333206886335848, 1670477943807622278), + (10095794471753144002, 2088097429759527848), + (6309871544845715001, 1305060893599704905), + (12499025449484531656, 1631326116999631131), + (11012095793428276666, 2039157646249538914), + (11494245889320060820, 1274473528905961821), + (532749306367912313, 1593091911132452277), + (5277622651387278295, 1991364888915565346), + (7910200175544436838, 1244603055572228341), + (14499436237857933952, 1555753819465285426), + (8900923260467641632, 1944692274331606783), + (12480606065433357876, 1215432671457254239), + (10989071563364309441, 1519290839321567799), + (9124653435777998898, 1899113549151959749), + (8008751406574943263, 1186945968219974843), + (5399253239791291175, 1483682460274968554), + (15972438586593889776, 1854603075343710692), + (759402079766405302, 1159126922089819183), + (14784310654990170340, 1448908652612273978), + (9257016281882937117, 1811135815765342473), + (16182956370781059300, 2263919769706678091), + (7808504722524468110, 1414949856066673807), + (5148944884728197234, 1768687320083342259), + (1824495087482858639, 2210859150104177824), + (1140309429676786649, 1381786968815111140), + (1425386787095983311, 1727233711018888925), + (6393419502297367043, 2159042138773611156), + (13219259225790630210, 1349401336733506972), + (16524074032238287762, 1686751670916883715), + (16043406521870471799, 2108439588646104644), + (803757039314269066, 1317774742903815403), + (14839754354425000045, 1647218428629769253), + (4714634887749086344, 2059023035787211567), + (9864175832484260821, 1286889397367007229), + (16941905809032713930, 1608611746708759036), + (2730638187581340797, 2010764683385948796), + (10930020904093113806, 1256727927116217997), + (18274212148543780162, 1570909908895272496), + (4396021111970173586, 1963637386119090621), + (5053356204195052443, 1227273366324431638), + (15540067292098591362, 1534091707905539547), + (14813398096695851299, 1917614634881924434), + (13870059828862294966, 1198509146801202771), + (12725888767650480803, 1498136433501503464), + (15907360959563101004, 1872670541876879330), + (14553786618154326031, 1170419088673049581), + (4357175217410743827, 1463023860841311977), + (10058155040190817688, 1828779826051639971), + (7961007781811134206, 2285974782564549964), + (14199001900486734687, 1428734239102843727), + (13137066357181030455, 1785917798878554659), + (11809646928048900164, 2232397248598193324), + (16604401366885338411, 1395248280373870827), + (16143815690179285109, 1744060350467338534), + (10956397575869330579, 2180075438084173168), + (6847748484918331612, 1362547148802608230), + (17783057643002690323, 1703183936003260287), + (17617136035325974999, 2128979920004075359), + (17928239049719816230, 1330612450002547099), + (17798612793722382384, 1663265562503183874), + (13024893955298202172, 2079081953128979843), + (5834715712847682405, 1299426220705612402), + (16516766677914378815, 1624282775882015502), + (11422586310538197711, 2030353469852519378), + (11750802462513761473, 1268970918657824611), + (10076817059714813937, 1586213648322280764), + (12596021324643517422, 1982767060402850955), + (5566670318688504437, 1239229412751781847), + (2346651879933242642, 1549036765939727309), + (7545000868343941206, 1936295957424659136), + (4715625542714963254, 1210184973390411960), + (5894531928393704067, 1512731216738014950), + (16591536947346905892, 1890914020922518687), + (17287239619732898039, 1181821263076574179), + (16997363506238734644, 1477276578845717724), + (2799960309088866689, 1846595723557147156), + (10973347230035317489, 1154122327223216972), + (13716684037544146861, 1442652909029021215), + (12534169028502795672, 1803316136286276519), + (11056025267201106687, 2254145170357845649), + (18439230838069161439, 1408840731473653530), + (13825666510731675991, 1761050914342066913), + (3447025083132431277, 2201313642927583642), + (6766076695385157452, 1375821026829739776), + (8457595869231446815, 1719776283537174720), + (10571994836539308519, 2149720354421468400), + (6607496772837067824, 1343575221513417750), + (17482743002901110588, 1679469026891772187), + (17241742735199000331, 2099336283614715234), + (15387775227926763111, 1312085177259197021), + (5399660979626290177, 1640106471573996277), + (11361262242960250625, 2050133089467495346), + (11712474920277544544, 1281333180917184591), + (10028907631919542777, 1601666476146480739), + (7924448521472040567, 2002083095183100924), + (14176152362774801162, 1251301934489438077), + (3885132398186337741, 1564127418111797597), + (9468101516160310080, 1955159272639746996), + (15140935484454969608, 1221974545399841872), + (479425281859160394, 1527468181749802341), + (5210967620751338397, 1909335227187252926), + (17091912818251750210, 1193334516992033078), + (12141518985959911954, 1491668146240041348), + (15176898732449889943, 1864585182800051685), + (11791404716994875166, 1165365739250032303), + (10127569877816206054, 1456707174062540379), + (8047776328842869663, 1820883967578175474), + (836348374198811271, 2276104959472719343), + (7440246761515338900, 1422565599670449589), + (13911994470321561530, 1778206999588061986), + (8166621051047176104, 2222758749485077483), + (2798295147690791113, 1389224218428173427), + (17332926989895652603, 1736530273035216783), + (17054472718942177850, 2170662841294020979), + (8353202440125167204, 1356664275808763112), + (10441503050156459005, 1695830344760953890), + (3828506775840797949, 2119787930951192363), + (86973725686804766, 1324867456844495227), + (13943775212390669669, 1656084321055619033), + (3594660960206173375, 2070105401319523792), + (2246663100128858359, 1293815875824702370), + (12031700912015848757, 1617269844780877962), + (5816254103165035138, 2021587305976097453), + (5941001823691840913, 1263492066235060908), + (7426252279614801142, 1579365082793826135), + (4671129331091113523, 1974206353492282669), + (5225298841145639904, 1233878970932676668), + (6531623551432049880, 1542348713665845835), + (3552843420862674446, 1927935892082307294), + (16055585193321335241, 1204959932551442058), + (10846109454796893243, 1506199915689302573), + (18169322836923504458, 1882749894611628216), + (11355826773077190286, 1176718684132267635), + (9583097447919099954, 1470898355165334544), + (11978871809898874942, 1838622943956668180), + (14973589762373593678, 2298278679945835225), + (2440964573842414192, 1436424174966147016), + (3051205717303017741, 1795530218707683770), + (13037379183483547984, 2244412773384604712), + (8148361989677217490, 1402757983365377945), + (14797138505523909766, 1753447479206722431), + (13884737113477499304, 2191809349008403039), + (15595489723564518921, 1369880843130251899), + (14882676136028260747, 1712351053912814874), + (9379973133180550126, 2140438817391018593), + (17391698254306313589, 1337774260869386620), + (3292878744173340370, 1672217826086733276), + (4116098430216675462, 2090272282608416595), + (266718509671728212, 1306420176630260372), + (333398137089660265, 1633025220787825465), + (5028433689789463235, 2041281525984781831), + (10060300083759496378, 1275800953740488644), + (12575375104699370472, 1594751192175610805), + (1884160825592049379, 1993438990219513507), + (17318501580490888525, 1245899368887195941), + (7813068920331446945, 1557374211108994927), + (5154650131986920777, 1946717763886243659), + (915813323278131534, 1216698602428902287), + (14979824709379828129, 1520873253036127858), + (9501408849870009354, 1901091566295159823), + (12855909558809837702, 1188182228934474889), + (2234828893230133415, 1485227786168093612), + (2793536116537666769, 1856534732710117015), + (8663489100477123587, 1160334207943823134), + (1605989338741628675, 1450417759929778918), + (11230858710281811652, 1813022199912223647), + (9426887369424876662, 2266277749890279559), + (12809333633531629769, 1416423593681424724), + (16011667041914537212, 1770529492101780905), + (6179525747111007803, 2213161865127226132), + (13085575628799155685, 1383226165704516332), + (16356969535998944606, 1729032707130645415), + (15834525901571292854, 2161290883913306769), + (2979049660840976177, 1350806802445816731), + (17558870131333383934, 1688508503057270913), + (8113529608884566205, 2110635628821588642), + (9682642023980241782, 1319147268013492901), + (16714988548402690132, 1648934085016866126), + (11670363648648586857, 2061167606271082658), + (11905663298832754689, 1288229753919426661), + (1047021068258779650, 1610287192399283327), + (15143834390605638274, 2012858990499104158), + (4853210475701136017, 1258036869061940099), + (1454827076199032118, 1572546086327425124), + (1818533845248790147, 1965682607909281405), + (3442426662494187794, 1228551629943300878), + (13526405364972510550, 1535689537429126097), + (3072948650933474476, 1919611921786407622), + (15755650962115585259, 1199757451116504763), + (15082877684217093670, 1499696813895630954), + (9630225068416591280, 1874621017369538693), + (8324733676974063502, 1171638135855961683), + (5794231077790191473, 1464547669819952104), + (7242788847237739342, 1830684587274940130), + (18276858095901949986, 2288355734093675162), + (16034722328366106645, 1430222333808546976), + (1596658836748081690, 1787777917260683721), + (6607509564362490017, 2234722396575854651), + (1823850468512862308, 1396701497859909157), + (6891499104068465790, 1745876872324886446), + (17837745916940358045, 2182346090406108057), + (4231062170446641922, 1363966306503817536), + (5288827713058302403, 1704957883129771920), + (6611034641322878003, 2131197353912214900), + (13355268687681574560, 1331998346195134312), + (16694085859601968200, 1664997932743917890), + (11644235287647684442, 2081247415929897363), + (4971804045566108824, 1300779634956185852), + (6214755056957636030, 1625974543695232315), + (3156757802769657134, 2032468179619040394), + (6584659645158423613, 1270292612261900246), + (17454196593302805324, 1587865765327375307), + (17206059723201118751, 1984832206659219134), + (6142101308573311315, 1240520129162011959), + (3065940617289251240, 1550650161452514949), + (8444111790038951954, 1938312701815643686), + (665883850346957067, 1211445438634777304), + (832354812933696334, 1514306798293471630), + (10263815553021896226, 1892883497866839537), + (17944099766707154901, 1183052186166774710), + (13206752671529167818, 1478815232708468388), + (16508440839411459773, 1848519040885585485), + (12623618533845856310, 1155324400553490928), + (15779523167307320387, 1444155500691863660), + (1277659885424598868, 1805194375864829576), + (1597074856780748586, 2256492969831036970), + (5609857803915355770, 1410308106144398106), + (16235694291748970521, 1762885132680497632), + (1847873790976661535, 2203606415850622041), + (12684136165428883219, 1377254009906638775), + (11243484188358716120, 1721567512383298469), + (219297180166231438, 2151959390479123087), + (7054589765244976505, 1344974619049451929), + (13429923224983608535, 1681218273811814911), + (12175718012802122765, 2101522842264768639), + (14527352785642408584, 1313451776415480399), + (13547504963625622826, 1641814720519350499), + (12322695186104640628, 2052268400649188124), + (16925056528170176201, 1282667750405742577), + (7321262604930556539, 1603334688007178222), + (18374950293017971482, 2004168360008972777), + (4566814905495150320, 1252605225005607986), + (14931890668723713708, 1565756531257009982), + (9441491299049866327, 1957195664071262478), + (1289246043478778550, 1223247290044539049), + (6223243572775861092, 1529059112555673811), + (3167368447542438461, 1911323890694592264), + (1979605279714024038, 1194577431684120165), + (7086192618069917952, 1493221789605150206), + (18081112809442173248, 1866527237006437757), + (13606538515115052232, 1166579523129023598), + (7784801107039039482, 1458224403911279498), + (507629346944023544, 1822780504889099373), + (5246222702107417334, 2278475631111374216), + (3278889188817135834, 1424047269444608885), + (8710297504448807696, 1780059086805761106), ]; diff -Nru cargo-0.44.1/vendor/ryu/src/d2s_intrinsics.rs cargo-0.47.0/vendor/ryu/src/d2s_intrinsics.rs --- cargo-0.44.1/vendor/ryu/src/d2s_intrinsics.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/src/d2s_intrinsics.rs 2020-10-01 21:38:28.000000000 +0000 @@ -18,6 +18,8 @@ // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. +use core::ptr; + // Returns (lo, hi). #[cfg(not(integer128))] #[cfg_attr(feature = "no-panic", inline)] @@ -100,6 +102,70 @@ #[cfg_attr(feature = "no-panic", inline)] pub fn multiple_of_power_of_2(value: u64, p: u32) -> bool { debug_assert!(value != 0); - // return __builtin_ctzll(value) >= p; + debug_assert!(p < 64); + // __builtin_ctzll doesn't appear to be faster here. (value & ((1u64 << p) - 1)) == 0 } + +#[cfg(integer128)] +#[cfg_attr(feature = "no-panic", inline)] +pub fn mul_shift_64(m: u64, mul: &(u64, u64), j: u32) -> u64 { + let b0 = m as u128 * mul.0 as u128; + let b2 = m as u128 * mul.1 as u128; + (((b0 >> 64) + b2) >> (j - 64)) as u64 +} + +#[cfg(integer128)] +#[cfg_attr(feature = "no-panic", inline)] +pub unsafe fn mul_shift_all_64( + m: u64, + mul: &(u64, u64), + j: u32, + vp: *mut u64, + vm: *mut u64, + mm_shift: u32, +) -> u64 { + ptr::write(vp, mul_shift_64(4 * m + 2, mul, j)); + ptr::write(vm, mul_shift_64(4 * m - 1 - mm_shift as u64, mul, j)); + mul_shift_64(4 * m, mul, j) +} + +#[cfg(not(integer128))] +#[cfg_attr(feature = "no-panic", inline)] +pub unsafe fn mul_shift_all_64( + mut m: u64, + mul: &(u64, u64), + j: u32, + vp: *mut u64, + vm: *mut u64, + mm_shift: u32, +) -> u64 { + m <<= 1; + // m is maximum 55 bits + let (lo, tmp) = umul128(m, mul.0); + let (mut mid, mut hi) = umul128(m, mul.1); + mid = mid.wrapping_add(tmp); + hi = hi.wrapping_add((mid < tmp) as u64); // overflow into hi + + let lo2 = lo.wrapping_add(mul.0); + let mid2 = mid.wrapping_add(mul.1).wrapping_add((lo2 < lo) as u64); + let hi2 = hi.wrapping_add((mid2 < mid) as u64); + ptr::write(vp, shiftright128(mid2, hi2, j - 64 - 1)); + + if mm_shift == 1 { + let lo3 = lo.wrapping_sub(mul.0); + let mid3 = mid.wrapping_sub(mul.1).wrapping_sub((lo3 > lo) as u64); + let hi3 = hi.wrapping_sub((mid3 > mid) as u64); + ptr::write(vm, shiftright128(mid3, hi3, j - 64 - 1)); + } else { + let lo3 = lo + lo; + let mid3 = mid.wrapping_add(mid).wrapping_add((lo3 < lo) as u64); + let hi3 = hi.wrapping_add(hi).wrapping_add((mid3 < mid) as u64); + let lo4 = lo3.wrapping_sub(mul.0); + let mid4 = mid3.wrapping_sub(mul.1).wrapping_sub((lo4 > lo3) as u64); + let hi4 = hi3.wrapping_sub((mid4 > mid3) as u64); + ptr::write(vm, shiftright128(mid4, hi4, j - 64)); + } + + shiftright128(mid, hi, j - 64 - 1) +} diff -Nru cargo-0.44.1/vendor/ryu/src/d2s.rs cargo-0.47.0/vendor/ryu/src/d2s.rs --- cargo-0.44.1/vendor/ryu/src/d2s.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/src/d2s.rs 2020-10-01 21:38:28.000000000 +0000 @@ -18,90 +18,22 @@ // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. -use core::ptr; - -#[cfg(maybe_uninit)] -use core::mem::MaybeUninit; - -#[cfg(not(maybe_uninit))] -use core::mem; - -use common::*; +use crate::common::*; #[cfg(not(feature = "small"))] -use d2s_full_table::*; -use d2s_intrinsics::*; +pub use crate::d2s_full_table::*; +use crate::d2s_intrinsics::*; #[cfg(feature = "small")] -use d2s_small_table::*; +pub use crate::d2s_small_table::*; +#[cfg(not(maybe_uninit))] +use core::mem; +#[cfg(maybe_uninit)] +use core::mem::MaybeUninit; pub const DOUBLE_MANTISSA_BITS: u32 = 52; pub const DOUBLE_EXPONENT_BITS: u32 = 11; - -const DOUBLE_BIAS: i32 = 1023; -const DOUBLE_POW5_INV_BITCOUNT: i32 = 122; -const DOUBLE_POW5_BITCOUNT: i32 = 121; - -#[cfg(integer128)] -#[cfg_attr(feature = "no-panic", inline)] -fn mul_shift(m: u64, mul: &(u64, u64), j: u32) -> u64 { - let b0 = m as u128 * mul.0 as u128; - let b2 = m as u128 * mul.1 as u128; - (((b0 >> 64) + b2) >> (j - 64)) as u64 -} - -#[cfg(integer128)] -#[cfg_attr(feature = "no-panic", inline)] -unsafe fn mul_shift_all( - m: u64, - mul: &(u64, u64), - j: u32, - vp: *mut u64, - vm: *mut u64, - mm_shift: u32, -) -> u64 { - ptr::write(vp, mul_shift(4 * m + 2, mul, j)); - ptr::write(vm, mul_shift(4 * m - 1 - mm_shift as u64, mul, j)); - mul_shift(4 * m, mul, j) -} - -#[cfg(not(integer128))] -#[cfg_attr(feature = "no-panic", inline)] -unsafe fn mul_shift_all( - mut m: u64, - mul: &(u64, u64), - j: u32, - vp: *mut u64, - vm: *mut u64, - mm_shift: u32, -) -> u64 { - m <<= 1; - // m is maximum 55 bits - let (lo, tmp) = umul128(m, mul.0); - let (mut mid, mut hi) = umul128(m, mul.1); - mid = mid.wrapping_add(tmp); - hi = hi.wrapping_add((mid < tmp) as u64); // overflow into hi - - let lo2 = lo.wrapping_add(mul.0); - let mid2 = mid.wrapping_add(mul.1).wrapping_add((lo2 < lo) as u64); - let hi2 = hi.wrapping_add((mid2 < mid) as u64); - ptr::write(vp, shiftright128(mid2, hi2, j - 64 - 1)); - - if mm_shift == 1 { - let lo3 = lo.wrapping_sub(mul.0); - let mid3 = mid.wrapping_sub(mul.1).wrapping_sub((lo3 > lo) as u64); - let hi3 = hi.wrapping_sub((mid3 > mid) as u64); - ptr::write(vm, shiftright128(mid3, hi3, j - 64 - 1)); - } else { - let lo3 = lo + lo; - let mid3 = mid.wrapping_add(mid).wrapping_add((lo3 < lo) as u64); - let hi3 = hi.wrapping_add(hi).wrapping_add((mid3 < mid) as u64); - let lo4 = lo3.wrapping_sub(mul.0); - let mid4 = mid3.wrapping_sub(mul.1).wrapping_sub((lo4 > lo3) as u64); - let hi4 = hi3.wrapping_sub((mid4 > mid3) as u64); - ptr::write(vm, shiftright128(mid4, hi4, j - 64)); - } - - shiftright128(mid, hi, j - 64 - 1) -} +pub const DOUBLE_BIAS: i32 = 1023; +pub const DOUBLE_POW5_INV_BITCOUNT: i32 = 125; +pub const DOUBLE_POW5_BITCOUNT: i32 = 125; #[cfg_attr(feature = "no-panic", inline)] pub fn decimal_length17(v: u64) -> u32 { @@ -205,7 +137,7 @@ let k = DOUBLE_POW5_INV_BITCOUNT + pow5bits(q as i32) - 1; let i = -e2 + q as i32 + k; vr = unsafe { - mul_shift_all( + mul_shift_all_64( m2, #[cfg(feature = "small")] &compute_inv_pow5(q), @@ -264,7 +196,7 @@ let k = pow5bits(i) - DOUBLE_POW5_BITCOUNT; let j = q as i32 - k; vr = unsafe { - mul_shift_all( + mul_shift_all_64( m2, #[cfg(feature = "small")] &compute_pow5(i as u32), diff -Nru cargo-0.44.1/vendor/ryu/src/d2s_small_table.rs cargo-0.47.0/vendor/ryu/src/d2s_small_table.rs --- cargo-0.44.1/vendor/ryu/src/d2s_small_table.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/src/d2s_small_table.rs 2020-10-01 21:38:28.000000000 +0000 @@ -18,9 +18,53 @@ // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. -use common::*; +use crate::common::*; #[cfg(not(integer128))] -use d2s_intrinsics::*; +use crate::d2s_intrinsics::*; + +pub static DOUBLE_POW5_INV_SPLIT2: [(u64, u64); 13] = [ + (1, 2305843009213693952), + (5955668970331000884, 1784059615882449851), + (8982663654677661702, 1380349269358112757), + (7286864317269821294, 2135987035920910082), + (7005857020398200553, 1652639921975621497), + (17965325103354776697, 1278668206209430417), + (8928596168509315048, 1978643211784836272), + (10075671573058298858, 1530901034580419511), + (597001226353042382, 1184477304306571148), + (1527430471115325346, 1832889850782397517), + (12533209867169019542, 1418129833677084982), + (5577825024675947042, 2194449627517475473), + (11006974540203867551, 1697873161311732311), +]; + +pub static POW5_INV_OFFSETS: [u32; 19] = [ + 0x54544554, 0x04055545, 0x10041000, 0x00400414, 0x40010000, 0x41155555, 0x00000454, 0x00010044, + 0x40000000, 0x44000041, 0x50454450, 0x55550054, 0x51655554, 0x40004000, 0x01000001, 0x00010500, + 0x51515411, 0x05555554, 0x00000000, +]; + +pub static DOUBLE_POW5_SPLIT2: [(u64, u64); 13] = [ + (0, 1152921504606846976), + (0, 1490116119384765625), + (1032610780636961552, 1925929944387235853), + (7910200175544436838, 1244603055572228341), + (16941905809032713930, 1608611746708759036), + (13024893955298202172, 2079081953128979843), + (6607496772837067824, 1343575221513417750), + (17332926989895652603, 1736530273035216783), + (13037379183483547984, 2244412773384604712), + (1605989338741628675, 1450417759929778918), + (9630225068416591280, 1874621017369538693), + (665883850346957067, 1211445438634777304), + (14931890668723713708, 1565756531257009982), +]; + +pub static POW5_OFFSETS: [u32; 21] = [ + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x40000000, 0x59695995, 0x55545555, 0x56555515, + 0x41150504, 0x40555410, 0x44555145, 0x44504540, 0x45555550, 0x40004000, 0x96440440, 0x55565565, + 0x54454045, 0x40154151, 0x55559155, 0x51405555, 0x00000105, +]; pub static DOUBLE_POW5_TABLE: [u64; 26] = [ 1, @@ -51,52 +95,7 @@ 298023223876953125, ]; -pub static DOUBLE_POW5_SPLIT2: [(u64, u64); 13] = [ - (0, 72057594037927936), - (10376293541461622784, 93132257461547851), - (15052517733678820785, 120370621524202240), - (6258995034005762182, 77787690973264271), - (14893927168346708332, 100538234169297439), - (4272820386026678563, 129942622070561240), - (7330497575943398595, 83973451344588609), - (18377130505971182927, 108533142064701048), - (10038208235822497557, 140275798336537794), - (7017903361312433648, 90651109995611182), - (6366496589810271835, 117163813585596168), - (9264989777501460624, 75715339914673581), - (17074144231291089770, 97859783203563123), -]; - -// Unfortunately, the results are sometimes off by one. We use an additional -// lookup table to store those cases and adjust the result. -pub static POW5_OFFSETS: [u32; 13] = [ - 0x00000000, 0x00000000, 0x00000000, 0x033c55be, 0x03db77d8, 0x0265ffb2, 0x00000800, 0x01a8ff56, - 0x00000000, 0x0037a200, 0x00004000, 0x03fffffc, 0x00003ffe, -]; - -pub static DOUBLE_POW5_INV_SPLIT2: [(u64, u64); 13] = [ - (1, 288230376151711744), - (7661987648932456967, 223007451985306231), - (12652048002903177473, 172543658669764094), - (5522544058086115566, 266998379490113760), - (3181575136763469022, 206579990246952687), - (4551508647133041040, 159833525776178802), - (1116074521063664381, 247330401473104534), - (17400360011128145022, 191362629322552438), - (9297997190148906106, 148059663038321393), - (11720143854957885429, 229111231347799689), - (15401709288678291155, 177266229209635622), - (3003071137298187333, 274306203439684434), - (17516772882021341108, 212234145163966538), -]; - -pub static POW5_INV_OFFSETS: [u32; 20] = [ - 0x51505404, 0x55054514, 0x45555545, 0x05511411, 0x00505010, 0x00000004, 0x00000000, 0x00000000, - 0x55555040, 0x00505051, 0x00050040, 0x55554000, 0x51659559, 0x00001000, 0x15000010, 0x55455555, - 0x41404051, 0x00001010, 0x00000014, 0x00000000, -]; - -// Computes 5^i in the form required by Ryu. +// Computes 5^i in the form required by Ryū. #[cfg(integer128)] #[cfg_attr(feature = "no-panic", inline)] pub unsafe fn compute_pow5(i: u32) -> (u64, u64) { @@ -116,11 +115,11 @@ debug_assert!(base < POW5_OFFSETS.len() as u32); let shifted_sum = (b0 >> delta) + (b2 << (64 - delta)) - + ((*POW5_OFFSETS.get_unchecked(base as usize) >> offset) & 1) as u128; + + ((*POW5_OFFSETS.get_unchecked((i / 16) as usize) >> ((i % 16) << 1)) & 3) as u128; (shifted_sum as u64, (shifted_sum >> 64) as u64) } -// Computes 5^-i in the form required by Ryu. +// Computes 5^-i in the form required by Ryū. #[cfg(integer128)] #[cfg_attr(feature = "no-panic", inline)] pub unsafe fn compute_inv_pow5(i: u32) -> (u64, u64) { @@ -144,7 +143,7 @@ (shifted_sum as u64, (shifted_sum >> 64) as u64) } -// Computes 5^i in the form required by Ryu, and stores it in the given pointer. +// Computes 5^i in the form required by Ryū, and stores it in the given pointer. #[cfg(not(integer128))] #[cfg_attr(feature = "no-panic", inline)] pub unsafe fn compute_pow5(i: u32) -> (u64, u64) { @@ -169,12 +168,12 @@ debug_assert!(base < POW5_OFFSETS.len() as u32); ( shiftright128(low0, sum, delta as u32) - + ((*POW5_OFFSETS.get_unchecked(base as usize) >> offset) & 1) as u64, + + ((*POW5_OFFSETS.get_unchecked((i / 16) as usize) >> ((i % 16) << 1)) & 3) as u64, shiftright128(sum, high1, delta as u32), ) } -// Computes 5^-i in the form required by Ryu, and stores it in the given pointer. +// Computes 5^-i in the form required by Ryū, and stores it in the given pointer. #[cfg(not(integer128))] #[cfg_attr(feature = "no-panic", inline)] pub unsafe fn compute_inv_pow5(i: u32) -> (u64, u64) { diff -Nru cargo-0.44.1/vendor/ryu/src/f2s_intrinsics.rs cargo-0.47.0/vendor/ryu/src/f2s_intrinsics.rs --- cargo-0.44.1/vendor/ryu/src/f2s_intrinsics.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/src/f2s_intrinsics.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,113 @@ +// Translated from C to Rust. The original C code can be found at +// https://github.com/ulfjack/ryu and carries the following license: +// +// Copyright 2018 Ulf Adams +// +// The contents of this file may be used under the terms of the Apache License, +// Version 2.0. +// +// (See accompanying file LICENSE-Apache or copy at +// http://www.apache.org/licenses/LICENSE-2.0) +// +// Alternatively, the contents of this file may be used under the terms of +// the Boost Software License, Version 1.0. +// (See accompanying file LICENSE-Boost or copy at +// https://www.boost.org/LICENSE_1_0.txt) +// +// Unless required by applicable law or agreed to in writing, this software +// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. + +use crate::d2s; + +pub const FLOAT_POW5_INV_BITCOUNT: i32 = d2s::DOUBLE_POW5_INV_BITCOUNT - 64; +pub const FLOAT_POW5_BITCOUNT: i32 = d2s::DOUBLE_POW5_BITCOUNT - 64; + +#[cfg_attr(feature = "no-panic", inline)] +fn pow5factor_32(mut value: u32) -> u32 { + let mut count = 0u32; + loop { + debug_assert!(value != 0); + let q = value / 5; + let r = value % 5; + if r != 0 { + break; + } + value = q; + count += 1; + } + count +} + +// Returns true if value is divisible by 5^p. +#[cfg_attr(feature = "no-panic", inline)] +pub fn multiple_of_power_of_5_32(value: u32, p: u32) -> bool { + pow5factor_32(value) >= p +} + +// Returns true if value is divisible by 2^p. +#[cfg_attr(feature = "no-panic", inline)] +pub fn multiple_of_power_of_2_32(value: u32, p: u32) -> bool { + // __builtin_ctz doesn't appear to be faster here. + (value & ((1u32 << p) - 1)) == 0 +} + +// It seems to be slightly faster to avoid uint128_t here, although the +// generated code for uint128_t looks slightly nicer. +#[cfg_attr(feature = "no-panic", inline)] +fn mul_shift_32(m: u32, factor: u64, shift: i32) -> u32 { + debug_assert!(shift > 32); + + // The casts here help MSVC to avoid calls to the __allmul library + // function. + let factor_lo = factor as u32; + let factor_hi = (factor >> 32) as u32; + let bits0 = m as u64 * factor_lo as u64; + let bits1 = m as u64 * factor_hi as u64; + + let sum = (bits0 >> 32) + bits1; + let shifted_sum = sum >> (shift - 32); + debug_assert!(shifted_sum <= u32::max_value() as u64); + shifted_sum as u32 +} + +#[cfg_attr(feature = "no-panic", inline)] +pub fn mul_pow5_inv_div_pow2(m: u32, q: u32, j: i32) -> u32 { + #[cfg(feature = "small")] + { + // The inverse multipliers are defined as [2^x / 5^y] + 1; the upper 64 + // bits from the double lookup table are the correct bits for [2^x / + // 5^y], so we have to add 1 here. Note that we rely on the fact that + // the added 1 that's already stored in the table never overflows into + // the upper 64 bits. + let pow5 = unsafe { d2s::compute_inv_pow5(q) }; + mul_shift_32(m, pow5.1 + 1, j) + } + + #[cfg(not(feature = "small"))] + { + debug_assert!(q < d2s::DOUBLE_POW5_INV_SPLIT.len() as u32); + unsafe { + mul_shift_32( + m, + d2s::DOUBLE_POW5_INV_SPLIT.get_unchecked(q as usize).1 + 1, + j, + ) + } + } +} + +#[cfg_attr(feature = "no-panic", inline)] +pub fn mul_pow5_div_pow2(m: u32, i: u32, j: i32) -> u32 { + #[cfg(feature = "small")] + { + let pow5 = unsafe { d2s::compute_pow5(i) }; + mul_shift_32(m, pow5.1, j) + } + + #[cfg(not(feature = "small"))] + { + debug_assert!(i < d2s::DOUBLE_POW5_SPLIT.len() as u32); + unsafe { mul_shift_32(m, d2s::DOUBLE_POW5_SPLIT.get_unchecked(i as usize).1, j) } + } +} diff -Nru cargo-0.44.1/vendor/ryu/src/f2s.rs cargo-0.47.0/vendor/ryu/src/f2s.rs --- cargo-0.44.1/vendor/ryu/src/f2s.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/src/f2s.rs 2020-10-01 21:38:28.000000000 +0000 @@ -18,160 +18,13 @@ // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. -use common::*; +use crate::common::*; +use crate::f2s_intrinsics::*; pub const FLOAT_MANTISSA_BITS: u32 = 23; pub const FLOAT_EXPONENT_BITS: u32 = 8; - const FLOAT_BIAS: i32 = 127; -const FLOAT_POW5_INV_BITCOUNT: i32 = 59; -const FLOAT_POW5_BITCOUNT: i32 = 61; - -// This table is generated by PrintFloatLookupTable. -static FLOAT_POW5_INV_SPLIT: [u64; 32] = [ - 576460752303423489, - 461168601842738791, - 368934881474191033, - 295147905179352826, - 472236648286964522, - 377789318629571618, - 302231454903657294, - 483570327845851670, - 386856262276681336, - 309485009821345069, - 495176015714152110, - 396140812571321688, - 316912650057057351, - 507060240091291761, - 405648192073033409, - 324518553658426727, - 519229685853482763, - 415383748682786211, - 332306998946228969, - 531691198313966350, - 425352958651173080, - 340282366920938464, - 544451787073501542, - 435561429658801234, - 348449143727040987, - 557518629963265579, - 446014903970612463, - 356811923176489971, - 570899077082383953, - 456719261665907162, - 365375409332725730, - 1 << 63, -]; - -static FLOAT_POW5_SPLIT: [u64; 47] = [ - 1152921504606846976, - 1441151880758558720, - 1801439850948198400, - 2251799813685248000, - 1407374883553280000, - 1759218604441600000, - 2199023255552000000, - 1374389534720000000, - 1717986918400000000, - 2147483648000000000, - 1342177280000000000, - 1677721600000000000, - 2097152000000000000, - 1310720000000000000, - 1638400000000000000, - 2048000000000000000, - 1280000000000000000, - 1600000000000000000, - 2000000000000000000, - 1250000000000000000, - 1562500000000000000, - 1953125000000000000, - 1220703125000000000, - 1525878906250000000, - 1907348632812500000, - 1192092895507812500, - 1490116119384765625, - 1862645149230957031, - 1164153218269348144, - 1455191522836685180, - 1818989403545856475, - 2273736754432320594, - 1421085471520200371, - 1776356839400250464, - 2220446049250313080, - 1387778780781445675, - 1734723475976807094, - 2168404344971008868, - 1355252715606880542, - 1694065894508600678, - 2117582368135750847, - 1323488980084844279, - 1654361225106055349, - 2067951531382569187, - 1292469707114105741, - 1615587133892632177, - 2019483917365790221, -]; - -#[cfg_attr(feature = "no-panic", inline)] -fn pow5_factor(mut value: u32) -> u32 { - let mut count = 0u32; - loop { - debug_assert!(value != 0); - let q = value / 5; - let r = value % 5; - if r != 0 { - break; - } - value = q; - count += 1; - } - count -} - -// Returns true if value is divisible by 5^p. -#[cfg_attr(feature = "no-panic", inline)] -fn multiple_of_power_of_5(value: u32, p: u32) -> bool { - pow5_factor(value) >= p -} - -// Returns true if value is divisible by 2^p. -#[cfg_attr(feature = "no-panic", inline)] -fn multiple_of_power_of_2(value: u32, p: u32) -> bool { - // return __builtin_ctz(value) >= p; - (value & ((1u32 << p) - 1)) == 0 -} - -// It seems to be slightly faster to avoid uint128_t here, although the -// generated code for uint128_t looks slightly nicer. -#[cfg_attr(feature = "no-panic", inline)] -fn mul_shift(m: u32, factor: u64, shift: i32) -> u32 { - debug_assert!(shift > 32); - - // The casts here help MSVC to avoid calls to the __allmul library - // function. - let factor_lo = factor as u32; - let factor_hi = (factor >> 32) as u32; - let bits0 = m as u64 * factor_lo as u64; - let bits1 = m as u64 * factor_hi as u64; - - let sum = (bits0 >> 32) + bits1; - let shifted_sum = sum >> (shift - 32); - debug_assert!(shifted_sum <= u32::max_value() as u64); - shifted_sum as u32 -} - -#[cfg_attr(feature = "no-panic", inline)] -fn mul_pow5_inv_div_pow2(m: u32, q: u32, j: i32) -> u32 { - debug_assert!(q < FLOAT_POW5_INV_SPLIT.len() as u32); - unsafe { mul_shift(m, *FLOAT_POW5_INV_SPLIT.get_unchecked(q as usize), j) } -} - -#[cfg_attr(feature = "no-panic", inline)] -fn mul_pow5_div_pow2(m: u32, i: u32, j: i32) -> u32 { - debug_assert!(i < FLOAT_POW5_SPLIT.len() as u32); - unsafe { mul_shift(m, *FLOAT_POW5_SPLIT.get_unchecked(i as usize), j) } -} +pub use crate::f2s_intrinsics::{FLOAT_POW5_BITCOUNT, FLOAT_POW5_INV_BITCOUNT}; // A floating decimal representing m * 10^e. pub struct FloatingDecimal32 { @@ -233,11 +86,11 @@ // The largest power of 5 that fits in 24 bits is 5^10, but q <= 9 seems to be safe as well. // Only one of mp, mv, and mm can be a multiple of 5, if any. if mv % 5 == 0 { - vr_is_trailing_zeros = multiple_of_power_of_5(mv, q); + vr_is_trailing_zeros = multiple_of_power_of_5_32(mv, q); } else if accept_bounds { - vm_is_trailing_zeros = multiple_of_power_of_5(mm, q); + vm_is_trailing_zeros = multiple_of_power_of_5_32(mm, q); } else { - vp -= multiple_of_power_of_5(mp, q) as u32; + vp -= multiple_of_power_of_5_32(mp, q) as u32; } } } else { @@ -266,7 +119,7 @@ } } else if q < 31 { // TODO(ulfjack): Use a tighter bound here. - vr_is_trailing_zeros = multiple_of_power_of_2(mv, q - 1); + vr_is_trailing_zeros = multiple_of_power_of_2_32(mv, q - 1); } } diff -Nru cargo-0.44.1/vendor/ryu/src/lib.rs cargo-0.47.0/vendor/ryu/src/lib.rs --- cargo-0.44.1/vendor/ryu/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,3 +1,11 @@ +//! [![github]](https://github.com/dtolnay/ryu) [![crates-io]](https://crates.io/crates/ryu) [![docs-rs]](https://docs.rs/ryu) +//! +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust +//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logoColor=white&logo=data:image/svg+xml;base64,PHN2ZyByb2xlPSJpbWciIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDUxMiA1MTIiPjxwYXRoIGZpbGw9IiNmNWY1ZjUiIGQ9Ik00ODguNiAyNTAuMkwzOTIgMjE0VjEwNS41YzAtMTUtOS4zLTI4LjQtMjMuNC0zMy43bC0xMDAtMzcuNWMtOC4xLTMuMS0xNy4xLTMuMS0yNS4zIDBsLTEwMCAzNy41Yy0xNC4xIDUuMy0yMy40IDE4LjctMjMuNCAzMy43VjIxNGwtOTYuNiAzNi4yQzkuMyAyNTUuNSAwIDI2OC45IDAgMjgzLjlWMzk0YzAgMTMuNiA3LjcgMjYuMSAxOS45IDMyLjJsMTAwIDUwYzEwLjEgNS4xIDIyLjEgNS4xIDMyLjIgMGwxMDMuOS01MiAxMDMuOSA1MmMxMC4xIDUuMSAyMi4xIDUuMSAzMi4yIDBsMTAwLTUwYzEyLjItNi4xIDE5LjktMTguNiAxOS45LTMyLjJWMjgzLjljMC0xNS05LjMtMjguNC0yMy40LTMzLjd6TTM1OCAyMTQuOGwtODUgMzEuOXYtNjguMmw4NS0zN3Y3My4zek0xNTQgMTA0LjFsMTAyLTM4LjIgMTAyIDM4LjJ2LjZsLTEwMiA0MS40LTEwMi00MS40di0uNnptODQgMjkxLjFsLTg1IDQyLjV2LTc5LjFsODUtMzguOHY3NS40em0wLTExMmwtMTAyIDQxLjQtMTAyLTQxLjR2LS42bDEwMi0zOC4yIDEwMiAzOC4ydi42em0yNDAgMTEybC04NSA0Mi41di03OS4xbDg1LTM4Ljh2NzUuNHptMC0xMTJsLTEwMiA0MS40LTEwMi00MS40di0uNmwxMDItMzguMiAxMDIgMzguMnYuNnoiPjwvcGF0aD48L3N2Zz4K +//! +//!
+//! //! Pure Rust implementation of Ryū, an algorithm to quickly convert floating //! point numbers to decimal strings. //! @@ -13,7 +21,7 @@ //! //! # Example //! -//! ```edition2018 +//! ``` //! fn main() { //! let mut buffer = ryu::Buffer::new(); //! let printed = buffer.format(1.234); @@ -54,7 +62,7 @@ //! $ cargo bench //! ``` //! -//! The benchmark shows Ryu approximately 4-10x faster than the standard library +//! The benchmark shows Ryū approximately 4-10x faster than the standard library //! across a range of f32 and f64 inputs. Measurements are in nanoseconds per //! iteration; smaller is better. //! @@ -81,16 +89,13 @@ //! notation. #![no_std] -#![doc(html_root_url = "https://docs.rs/ryu/1.0.4")] +#![doc(html_root_url = "https://docs.rs/ryu/1.0.5")] #![cfg_attr(feature = "cargo-clippy", allow(renamed_and_removed_lints))] #![cfg_attr( feature = "cargo-clippy", allow(cast_lossless, many_single_char_names, unreadable_literal,) )] -#[cfg(feature = "no-panic")] -extern crate no_panic; - mod buffer; mod common; mod d2s; @@ -101,11 +106,12 @@ mod d2s_small_table; mod digit_table; mod f2s; +mod f2s_intrinsics; mod pretty; -pub use buffer::{Buffer, Float}; +pub use crate::buffer::{Buffer, Float}; /// Unsafe functions that mirror the API of the C implementation of Ryū. pub mod raw { - pub use pretty::{format32, format64}; + pub use crate::pretty::{format32, format64}; } diff -Nru cargo-0.44.1/vendor/ryu/src/parse.rs cargo-0.47.0/vendor/ryu/src/parse.rs --- cargo-0.44.1/vendor/ryu/src/parse.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/src/parse.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,19 @@ +use core::fmt::{self, Display}; + +#[derive(Copy, Clone, Debug)] +pub enum Error { + InputTooShort, + InputTooLong, + MalformedInput, +} + +impl Display for Error { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let msg = match self { + Error::InputTooShort => "input too short", + Error::InputTooLong => "input too long", + Error::MalformedInput => "malformed input", + }; + formatter.write_str(msg) + } +} diff -Nru cargo-0.44.1/vendor/ryu/src/pretty/exponent.rs cargo-0.47.0/vendor/ryu/src/pretty/exponent.rs --- cargo-0.44.1/vendor/ryu/src/pretty/exponent.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/src/pretty/exponent.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,7 +1,6 @@ +use crate::digit_table::*; use core::ptr; -use digit_table::*; - #[cfg_attr(feature = "no-panic", inline)] pub unsafe fn write_exponent3(mut k: isize, mut result: *mut u8) -> usize { let sign = k < 0; diff -Nru cargo-0.44.1/vendor/ryu/src/pretty/mantissa.rs cargo-0.47.0/vendor/ryu/src/pretty/mantissa.rs --- cargo-0.44.1/vendor/ryu/src/pretty/mantissa.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/src/pretty/mantissa.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,7 +1,6 @@ +use crate::digit_table::*; use core::ptr; -use digit_table::*; - #[cfg_attr(feature = "no-panic", inline)] pub unsafe fn write_mantissa_long(mut output: u64, mut result: *mut u8) { if (output >> 32) != 0 { diff -Nru cargo-0.44.1/vendor/ryu/src/pretty/mod.rs cargo-0.47.0/vendor/ryu/src/pretty/mod.rs --- cargo-0.44.1/vendor/ryu/src/pretty/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/src/pretty/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,15 +1,12 @@ mod exponent; mod mantissa; -use core::{mem, ptr}; - use self::exponent::*; use self::mantissa::*; -use common; -use d2s; -use d2s::*; -use f2s::*; - +use crate::common; +use crate::d2s::{self, *}; +use crate::f2s::*; +use core::{mem, ptr}; #[cfg(feature = "no-panic")] use no_panic::no_panic; @@ -37,7 +34,7 @@ /// /// ## Example /// -/// ```edition2018 +/// ``` /// use std::{mem::MaybeUninit, slice, str}; /// /// let f = 1.234f64; @@ -50,7 +47,7 @@ /// assert_eq!(print, "1.234"); /// } /// ``` -#[cfg_attr(must_use_return, must_use)] +#[must_use] #[cfg_attr(feature = "no-panic", no_panic)] pub unsafe fn format64(f: f64, result: *mut u8) -> usize { let bits = mem::transmute::(f); @@ -144,7 +141,7 @@ /// /// ## Example /// -/// ```edition2018 +/// ``` /// use std::{mem::MaybeUninit, slice, str}; /// /// let f = 1.234f32; @@ -157,7 +154,7 @@ /// assert_eq!(print, "1.234"); /// } /// ``` -#[cfg_attr(must_use_return, must_use)] +#[must_use] #[cfg_attr(feature = "no-panic", no_panic)] pub unsafe fn format32(f: f32, result: *mut u8) -> usize { let bits = mem::transmute::(f); diff -Nru cargo-0.44.1/vendor/ryu/src/s2d.rs cargo-0.47.0/vendor/ryu/src/s2d.rs --- cargo-0.44.1/vendor/ryu/src/s2d.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/src/s2d.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,216 @@ +use crate::common::*; +use crate::d2s; +use crate::d2s_intrinsics::*; +use crate::parse::Error; +#[cfg(feature = "no-panic")] +use no_panic::no_panic; + +const DOUBLE_EXPONENT_BIAS: usize = 1023; + +fn floor_log2(value: u64) -> u32 { + 63_u32.wrapping_sub(value.leading_zeros()) +} + +#[cfg_attr(feature = "no-panic", no_panic)] +pub fn s2d(buffer: &[u8]) -> Result { + let len = buffer.len(); + if len == 0 { + return Err(Error::InputTooShort); + } + + let mut m10digits = 0; + let mut e10digits = 0; + let mut dot_index = len; + let mut e_index = len; + let mut m10 = 0u64; + let mut e10 = 0i32; + let mut signed_m = false; + let mut signed_e = false; + + let mut i = 0; + if unsafe { *buffer.get_unchecked(0) } == b'-' { + signed_m = true; + i += 1; + } + + while let Some(c) = buffer.get(i).copied() { + if c == b'.' { + if dot_index != len { + return Err(Error::MalformedInput); + } + dot_index = i; + i += 1; + continue; + } + if c < b'0' || c > b'9' { + break; + } + if m10digits >= 17 { + return Err(Error::InputTooLong); + } + m10 = 10 * m10 + (c - b'0') as u64; + if m10 != 0 { + m10digits += 1; + } + i += 1; + } + + if let Some(b'e') | Some(b'E') = buffer.get(i) { + e_index = i; + i += 1; + match buffer.get(i) { + Some(b'-') => { + signed_e = true; + i += 1; + } + Some(b'+') => i += 1, + _ => {} + } + while let Some(c) = buffer.get(i).copied() { + if c < b'0' || c > b'9' { + return Err(Error::MalformedInput); + } + if e10digits > 3 { + // TODO: Be more lenient. Return +/-Infinity or +/-0 instead. + return Err(Error::InputTooLong); + } + e10 = 10 * e10 + (c - b'0') as i32; + if e10 != 0 { + e10digits += 1; + } + i += 1; + } + } + + if i < len { + return Err(Error::MalformedInput); + } + if signed_e { + e10 = -e10; + } + e10 -= if dot_index < e_index { + (e_index - dot_index - 1) as i32 + } else { + 0 + }; + if m10 == 0 { + return Ok(if signed_m { -0.0 } else { 0.0 }); + } + + if m10digits + e10 <= -324 || m10 == 0 { + // Number is less than 1e-324, which should be rounded down to 0; return + // +/-0.0. + let ieee = (signed_m as u64) << (d2s::DOUBLE_EXPONENT_BITS + d2s::DOUBLE_MANTISSA_BITS); + return Ok(f64::from_bits(ieee)); + } + if m10digits + e10 >= 310 { + // Number is larger than 1e+309, which should be rounded to +/-Infinity. + let ieee = ((signed_m as u64) << (d2s::DOUBLE_EXPONENT_BITS + d2s::DOUBLE_MANTISSA_BITS)) + | (0x7ff_u64 << d2s::DOUBLE_MANTISSA_BITS); + return Ok(f64::from_bits(ieee)); + } + + // Convert to binary float m2 * 2^e2, while retaining information about + // whether the conversion was exact (trailing_zeros). + let e2: i32; + let m2: u64; + let mut trailing_zeros: bool; + if e10 >= 0 { + // The length of m * 10^e in bits is: + // log2(m10 * 10^e10) = log2(m10) + e10 log2(10) = log2(m10) + e10 + e10 * log2(5) + // + // We want to compute the DOUBLE_MANTISSA_BITS + 1 top-most bits (+1 for + // the implicit leading one in IEEE format). We therefore choose a + // binary output exponent of + // log2(m10 * 10^e10) - (DOUBLE_MANTISSA_BITS + 1). + // + // We use floor(log2(5^e10)) so that we get at least this many bits; + // better to have an additional bit than to not have enough bits. + e2 = floor_log2(m10) + .wrapping_add(e10 as u32) + .wrapping_add(log2_pow5(e10) as u32) + .wrapping_sub(d2s::DOUBLE_MANTISSA_BITS + 1) as i32; + + // We now compute [m10 * 10^e10 / 2^e2] = [m10 * 5^e10 / 2^(e2-e10)]. + // To that end, we use the DOUBLE_POW5_SPLIT table. + let j = e2 + .wrapping_sub(e10) + .wrapping_sub(ceil_log2_pow5(e10)) + .wrapping_add(d2s::DOUBLE_POW5_BITCOUNT); + debug_assert!(j >= 0); + debug_assert!(e10 < d2s::DOUBLE_POW5_SPLIT.len() as i32); + m2 = mul_shift_64( + m10, + unsafe { d2s::DOUBLE_POW5_SPLIT.get_unchecked(e10 as usize) }, + j as u32, + ); + + // We also compute if the result is exact, i.e., + // [m10 * 10^e10 / 2^e2] == m10 * 10^e10 / 2^e2. + // This can only be the case if 2^e2 divides m10 * 10^e10, which in turn + // requires that the largest power of 2 that divides m10 + e10 is + // greater than e2. If e2 is less than e10, then the result must be + // exact. Otherwise we use the existing multiple_of_power_of_2 function. + trailing_zeros = + e2 < e10 || e2 - e10 < 64 && multiple_of_power_of_2(m10, (e2 - e10) as u32); + } else { + e2 = floor_log2(m10) + .wrapping_add(e10 as u32) + .wrapping_sub(ceil_log2_pow5(-e10) as u32) + .wrapping_sub(d2s::DOUBLE_MANTISSA_BITS + 1) as i32; + let j = e2 + .wrapping_sub(e10) + .wrapping_add(ceil_log2_pow5(-e10)) + .wrapping_sub(1) + .wrapping_add(d2s::DOUBLE_POW5_INV_BITCOUNT); + debug_assert!(-e10 < d2s::DOUBLE_POW5_INV_SPLIT.len() as i32); + m2 = mul_shift_64( + m10, + unsafe { d2s::DOUBLE_POW5_INV_SPLIT.get_unchecked(-e10 as usize) }, + j as u32, + ); + trailing_zeros = multiple_of_power_of_5(m10, -e10 as u32); + } + + // Compute the final IEEE exponent. + let mut ieee_e2 = i32::max(0, e2 + DOUBLE_EXPONENT_BIAS as i32 + floor_log2(m2) as i32) as u32; + + if ieee_e2 > 0x7fe { + // Final IEEE exponent is larger than the maximum representable; return +/-Infinity. + let ieee = ((signed_m as u64) << (d2s::DOUBLE_EXPONENT_BITS + d2s::DOUBLE_MANTISSA_BITS)) + | (0x7ff_u64 << d2s::DOUBLE_MANTISSA_BITS); + return Ok(f64::from_bits(ieee)); + } + + // We need to figure out how much we need to shift m2. The tricky part is + // that we need to take the final IEEE exponent into account, so we need to + // reverse the bias and also special-case the value 0. + let shift = if ieee_e2 == 0 { 1 } else { ieee_e2 as i32 } + .wrapping_sub(e2) + .wrapping_sub(DOUBLE_EXPONENT_BIAS as i32) + .wrapping_sub(d2s::DOUBLE_MANTISSA_BITS as i32); + debug_assert!(shift >= 0); + + // We need to round up if the exact value is more than 0.5 above the value + // we computed. That's equivalent to checking if the last removed bit was 1 + // and either the value was not just trailing zeros or the result would + // otherwise be odd. + // + // We need to update trailing_zeros given that we have the exact output + // exponent ieee_e2 now. + trailing_zeros &= (m2 & ((1_u64 << (shift - 1)) - 1)) == 0; + let last_removed_bit = (m2 >> (shift - 1)) & 1; + let round_up = last_removed_bit != 0 && (!trailing_zeros || ((m2 >> shift) & 1) != 0); + + let mut ieee_m2 = (m2 >> shift).wrapping_add(round_up as u64); + if ieee_m2 == (1_u64 << (d2s::DOUBLE_MANTISSA_BITS + 1)) { + // Due to how the IEEE represents +/-Infinity, we don't need to check + // for overflow here. + ieee_e2 += 1; + } + ieee_m2 &= (1_u64 << d2s::DOUBLE_MANTISSA_BITS) - 1; + let ieee = ((((signed_m as u64) << d2s::DOUBLE_EXPONENT_BITS) | ieee_e2 as u64) + << d2s::DOUBLE_MANTISSA_BITS) + | ieee_m2; + Ok(f64::from_bits(ieee)) +} diff -Nru cargo-0.44.1/vendor/ryu/src/s2f.rs cargo-0.47.0/vendor/ryu/src/s2f.rs --- cargo-0.44.1/vendor/ryu/src/s2f.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/src/s2f.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,207 @@ +use crate::common::*; +use crate::f2s; +use crate::f2s_intrinsics::*; +use crate::parse::Error; +#[cfg(feature = "no-panic")] +use no_panic::no_panic; + +const FLOAT_EXPONENT_BIAS: usize = 127; + +fn floor_log2(value: u32) -> u32 { + 31_u32.wrapping_sub(value.leading_zeros()) +} + +#[cfg_attr(feature = "no-panic", no_panic)] +pub fn s2f(buffer: &[u8]) -> Result { + let len = buffer.len(); + if len == 0 { + return Err(Error::InputTooShort); + } + + let mut m10digits = 0; + let mut e10digits = 0; + let mut dot_index = len; + let mut e_index = len; + let mut m10 = 0u32; + let mut e10 = 0i32; + let mut signed_m = false; + let mut signed_e = false; + + let mut i = 0; + if unsafe { *buffer.get_unchecked(0) } == b'-' { + signed_m = true; + i += 1; + } + + while let Some(c) = buffer.get(i).copied() { + if c == b'.' { + if dot_index != len { + return Err(Error::MalformedInput); + } + dot_index = i; + i += 1; + continue; + } + if c < b'0' || c > b'9' { + break; + } + if m10digits >= 9 { + return Err(Error::InputTooLong); + } + m10 = 10 * m10 + (c - b'0') as u32; + if m10 != 0 { + m10digits += 1; + } + i += 1; + } + + if let Some(b'e') | Some(b'E') = buffer.get(i) { + e_index = i; + i += 1; + match buffer.get(i) { + Some(b'-') => { + signed_e = true; + i += 1; + } + Some(b'+') => i += 1, + _ => {} + } + while let Some(c) = buffer.get(i).copied() { + if c < b'0' || c > b'9' { + return Err(Error::MalformedInput); + } + if e10digits > 3 { + // TODO: Be more lenient. Return +/-Infinity or +/-0 instead. + return Err(Error::InputTooLong); + } + e10 = 10 * e10 + (c - b'0') as i32; + if e10 != 0 { + e10digits += 1; + } + i += 1; + } + } + + if i < len { + return Err(Error::MalformedInput); + } + if signed_e { + e10 = -e10; + } + e10 -= if dot_index < e_index { + (e_index - dot_index - 1) as i32 + } else { + 0 + }; + if m10 == 0 { + return Ok(if signed_m { -0.0 } else { 0.0 }); + } + + if m10digits + e10 <= -46 || m10 == 0 { + // Number is less than 1e-46, which should be rounded down to 0; return + // +/-0.0. + let ieee = (signed_m as u32) << (f2s::FLOAT_EXPONENT_BITS + f2s::FLOAT_MANTISSA_BITS); + return Ok(f32::from_bits(ieee)); + } + if m10digits + e10 >= 40 { + // Number is larger than 1e+39, which should be rounded to +/-Infinity. + let ieee = ((signed_m as u32) << (f2s::FLOAT_EXPONENT_BITS + f2s::FLOAT_MANTISSA_BITS)) + | (0xff_u32 << f2s::FLOAT_MANTISSA_BITS); + return Ok(f32::from_bits(ieee)); + } + + // Convert to binary float m2 * 2^e2, while retaining information about + // whether the conversion was exact (trailing_zeros). + let e2: i32; + let m2: u32; + let mut trailing_zeros: bool; + if e10 >= 0 { + // The length of m * 10^e in bits is: + // log2(m10 * 10^e10) = log2(m10) + e10 log2(10) = log2(m10) + e10 + e10 * log2(5) + // + // We want to compute the FLOAT_MANTISSA_BITS + 1 top-most bits (+1 for + // the implicit leading one in IEEE format). We therefore choose a + // binary output exponent of + // log2(m10 * 10^e10) - (FLOAT_MANTISSA_BITS + 1). + // + // We use floor(log2(5^e10)) so that we get at least this many bits; better to + // have an additional bit than to not have enough bits. + e2 = floor_log2(m10) + .wrapping_add(e10 as u32) + .wrapping_add(log2_pow5(e10) as u32) + .wrapping_sub(f2s::FLOAT_MANTISSA_BITS + 1) as i32; + + // We now compute [m10 * 10^e10 / 2^e2] = [m10 * 5^e10 / 2^(e2-e10)]. + // To that end, we use the FLOAT_POW5_SPLIT table. + let j = e2 + .wrapping_sub(e10) + .wrapping_sub(ceil_log2_pow5(e10)) + .wrapping_add(f2s::FLOAT_POW5_BITCOUNT); + debug_assert!(j >= 0); + m2 = mul_pow5_div_pow2(m10, e10 as u32, j); + + // We also compute if the result is exact, i.e., + // [m10 * 10^e10 / 2^e2] == m10 * 10^e10 / 2^e2. + // This can only be the case if 2^e2 divides m10 * 10^e10, which in turn + // requires that the largest power of 2 that divides m10 + e10 is + // greater than e2. If e2 is less than e10, then the result must be + // exact. Otherwise we use the existing multiple_of_power_of_2 function. + trailing_zeros = + e2 < e10 || e2 - e10 < 32 && multiple_of_power_of_2_32(m10, (e2 - e10) as u32); + } else { + e2 = floor_log2(m10) + .wrapping_add(e10 as u32) + .wrapping_sub(ceil_log2_pow5(-e10) as u32) + .wrapping_sub(f2s::FLOAT_MANTISSA_BITS + 1) as i32; + let j = e2 + .wrapping_sub(e10) + .wrapping_add(ceil_log2_pow5(-e10)) + .wrapping_sub(1) + .wrapping_add(f2s::FLOAT_POW5_INV_BITCOUNT); + m2 = mul_pow5_inv_div_pow2(m10, -e10 as u32, j); + trailing_zeros = multiple_of_power_of_5_32(m10, -e10 as u32); + } + + // Compute the final IEEE exponent. + let mut ieee_e2 = i32::max(0, e2 + FLOAT_EXPONENT_BIAS as i32 + floor_log2(m2) as i32) as u32; + + if ieee_e2 > 0xfe { + // Final IEEE exponent is larger than the maximum representable; return + // +/-Infinity. + let ieee = ((signed_m as u32) << (f2s::FLOAT_EXPONENT_BITS + f2s::FLOAT_MANTISSA_BITS)) + | (0xff_u32 << f2s::FLOAT_MANTISSA_BITS); + return Ok(f32::from_bits(ieee)); + } + + // We need to figure out how much we need to shift m2. The tricky part is + // that we need to take the final IEEE exponent into account, so we need to + // reverse the bias and also special-case the value 0. + let shift = if ieee_e2 == 0 { 1 } else { ieee_e2 as i32 } + .wrapping_sub(e2) + .wrapping_sub(FLOAT_EXPONENT_BIAS as i32) + .wrapping_sub(f2s::FLOAT_MANTISSA_BITS as i32); + debug_assert!(shift >= 0); + + // We need to round up if the exact value is more than 0.5 above the value + // we computed. That's equivalent to checking if the last removed bit was 1 + // and either the value was not just trailing zeros or the result would + // otherwise be odd. + // + // We need to update trailing_zeros given that we have the exact output + // exponent ieee_e2 now. + trailing_zeros &= (m2 & ((1_u32 << (shift - 1)) - 1)) == 0; + let last_removed_bit = (m2 >> (shift - 1)) & 1; + let round_up = last_removed_bit != 0 && (!trailing_zeros || ((m2 >> shift) & 1) != 0); + + let mut ieee_m2 = (m2 >> shift).wrapping_add(round_up as u32); + if ieee_m2 == (1_u32 << (f2s::FLOAT_MANTISSA_BITS + 1)) { + // Due to how the IEEE represents +/-Infinity, we don't need to check + // for overflow here. + ieee_e2 += 1; + } + ieee_m2 &= (1_u32 << f2s::FLOAT_MANTISSA_BITS) - 1; + let ieee = ((((signed_m as u32) << f2s::FLOAT_EXPONENT_BITS) | ieee_e2 as u32) + << f2s::FLOAT_MANTISSA_BITS) + | ieee_m2; + Ok(f32::from_bits(ieee)) +} diff -Nru cargo-0.44.1/vendor/ryu/tests/common_test.rs cargo-0.47.0/vendor/ryu/tests/common_test.rs --- cargo-0.44.1/vendor/ryu/tests/common_test.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/tests/common_test.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,83 @@ +// Translated from C to Rust. The original C code can be found at +// https://github.com/ulfjack/ryu and carries the following license: +// +// Copyright 2018 Ulf Adams +// +// The contents of this file may be used under the terms of the Apache License, +// Version 2.0. +// +// (See accompanying file LICENSE-Apache or copy at +// http://www.apache.org/licenses/LICENSE-2.0) +// +// Alternatively, the contents of this file may be used under the terms of +// the Boost Software License, Version 1.0. +// (See accompanying file LICENSE-Boost or copy at +// https://www.boost.org/LICENSE_1_0.txt) +// +// Unless required by applicable law or agreed to in writing, this software +// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. + +#![allow(dead_code)] + +#[path = "../src/common.rs"] +mod common; + +use common::*; + +#[test] +fn test_decimal_length9() { + assert_eq!(1, decimal_length9(0)); + assert_eq!(1, decimal_length9(1)); + assert_eq!(1, decimal_length9(9)); + assert_eq!(2, decimal_length9(10)); + assert_eq!(2, decimal_length9(99)); + assert_eq!(3, decimal_length9(100)); + assert_eq!(3, decimal_length9(999)); + assert_eq!(9, decimal_length9(999999999)); +} + +#[test] +fn test_ceil_log2_pow5() { + assert_eq!(1, ceil_log2_pow5(0)); + assert_eq!(3, ceil_log2_pow5(1)); + assert_eq!(5, ceil_log2_pow5(2)); + assert_eq!(7, ceil_log2_pow5(3)); + assert_eq!(10, ceil_log2_pow5(4)); + assert_eq!(8192, ceil_log2_pow5(3528)); +} + +#[test] +fn test_log10_pow2() { + assert_eq!(0, log10_pow2(0)); + assert_eq!(0, log10_pow2(1)); + assert_eq!(0, log10_pow2(2)); + assert_eq!(0, log10_pow2(3)); + assert_eq!(1, log10_pow2(4)); + assert_eq!(496, log10_pow2(1650)); +} + +#[test] +fn test_log10_pow5() { + assert_eq!(0, log10_pow5(0)); + assert_eq!(0, log10_pow5(1)); + assert_eq!(1, log10_pow5(2)); + assert_eq!(2, log10_pow5(3)); + assert_eq!(2, log10_pow5(4)); + assert_eq!(1831, log10_pow5(2620)); +} + +#[test] +fn test_float_to_bits() { + assert_eq!(0, 0.0_f32.to_bits()); + assert_eq!(0x40490fda, 3.1415926_f32.to_bits()); +} + +#[test] +fn test_double_to_bits() { + assert_eq!(0, 0.0_f64.to_bits()); + assert_eq!( + 0x400921FB54442D18, + 3.1415926535897932384626433_f64.to_bits(), + ); +} diff -Nru cargo-0.44.1/vendor/ryu/tests/d2s_table_test.rs cargo-0.47.0/vendor/ryu/tests/d2s_table_test.rs --- cargo-0.44.1/vendor/ryu/tests/d2s_table_test.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/tests/d2s_table_test.rs 2020-10-01 21:38:28.000000000 +0000 @@ -20,8 +20,6 @@ #![allow(dead_code)] -extern crate core; - #[path = "../src/common.rs"] mod common; @@ -46,7 +44,7 @@ #[test] fn test_compute_inv_pow5() { - for (i, entry) in DOUBLE_POW5_INV_SPLIT.iter().enumerate() { + for (i, entry) in DOUBLE_POW5_INV_SPLIT[..292].iter().enumerate() { assert_eq!(*entry, unsafe { compute_inv_pow5(i as u32) }, "entry {}", i); } } diff -Nru cargo-0.44.1/vendor/ryu/tests/d2s_test.rs cargo-0.47.0/vendor/ryu/tests/d2s_test.rs --- cargo-0.44.1/vendor/ryu/tests/d2s_test.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/tests/d2s_test.rs 2020-10-01 21:38:28.000000000 +0000 @@ -18,9 +18,6 @@ // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. -extern crate rand; -extern crate ryu; - #[macro_use] mod macros; @@ -59,8 +56,8 @@ } } -#[cfg(not(miri))] #[test] +#[cfg_attr(miri, ignore)] fn test_non_finite() { for i in 0u64..1 << 23 { let f = f64::from_bits((((1 << 11) - 1) << 52) + (i << 29)); @@ -114,7 +111,7 @@ fn test_looks_like_pow5() { // These numbers have a mantissa that is a multiple of the largest power of // 5 that fits, and an exponent that causes the computation for q to result - // in 22, which is a corner case for Ryu. + // in 22, which is a corner case for Ryū. assert_eq!(f64::from_bits(0x4830F0CF064DD592), 5.764607523034235e39); check!(5.764607523034235e39); assert_eq!(f64::from_bits(0x4840F0CF064DD592), 1.152921504606847e40); diff -Nru cargo-0.44.1/vendor/ryu/tests/exhaustive.rs cargo-0.47.0/vendor/ryu/tests/exhaustive.rs --- cargo-0.44.1/vendor/ryu/tests/exhaustive.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/tests/exhaustive.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,5 @@ #![cfg(exhaustive)] -extern crate num_cpus; -extern crate ryu; - use std::str; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; diff -Nru cargo-0.44.1/vendor/ryu/tests/f2s_test.rs cargo-0.47.0/vendor/ryu/tests/f2s_test.rs --- cargo-0.44.1/vendor/ryu/tests/f2s_test.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/tests/f2s_test.rs 2020-10-01 21:38:28.000000000 +0000 @@ -18,9 +18,6 @@ // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. -extern crate rand; -extern crate ryu; - #[macro_use] mod macros; @@ -54,8 +51,8 @@ } } -#[cfg(not(miri))] #[test] +#[cfg_attr(miri, ignore)] fn test_non_finite() { for i in 0u32..1 << 23 { let f = f32::from_bits((((1 << 8) - 1) << 23) + i); @@ -152,7 +149,7 @@ fn test_looks_like_pow5() { // These numbers have a mantissa that is the largest power of 5 that fits, // and an exponent that causes the computation for q to result in 10, which - // is a corner case for Ryu. + // is a corner case for Ryū. assert_eq!(f32::from_bits(0x5D1502F9), 6.7108864e17); check!(6.7108864e17); assert_eq!(f32::from_bits(0x5D9502F9), 1.3421773e18); diff -Nru cargo-0.44.1/vendor/ryu/tests/s2d_test.rs cargo-0.47.0/vendor/ryu/tests/s2d_test.rs --- cargo-0.44.1/vendor/ryu/tests/s2d_test.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/tests/s2d_test.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,130 @@ +// Translated from C to Rust. The original C code can be found at +// https://github.com/ulfjack/ryu and carries the following license: +// +// Copyright 2018 Ulf Adams +// +// The contents of this file may be used under the terms of the Apache License, +// Version 2.0. +// +// (See accompanying file LICENSE-Apache or copy at +// http://www.apache.org/licenses/LICENSE-2.0) +// +// Alternatively, the contents of this file may be used under the terms of +// the Boost Software License, Version 1.0. +// (See accompanying file LICENSE-Boost or copy at +// https://www.boost.org/LICENSE_1_0.txt) +// +// Unless required by applicable law or agreed to in writing, this software +// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. + +#![cfg(not(feature = "small"))] +#![allow(dead_code)] + +#[path = "../src/common.rs"] +mod common; + +#[path = "../src/d2s_full_table.rs"] +mod d2s_full_table; + +#[path = "../src/d2s_intrinsics.rs"] +mod d2s_intrinsics; + +#[path = "../src/d2s.rs"] +mod d2s; + +#[path = "../src/s2d.rs"] +mod s2d; + +#[path = "../src/parse.rs"] +mod parse; + +use crate::parse::Error; +use crate::s2d::s2d; + +impl PartialEq for Error { + fn eq(&self, other: &Self) -> bool { + *self as u8 == *other as u8 + } +} + +#[test] +fn test_bad_input() { + assert_eq!(Error::MalformedInput, s2d(b"x").unwrap_err()); + assert_eq!(Error::MalformedInput, s2d(b"1..1").unwrap_err()); + assert_eq!(Error::MalformedInput, s2d(b"..").unwrap_err()); + assert_eq!(Error::MalformedInput, s2d(b"1..1").unwrap_err()); + assert_eq!(Error::MalformedInput, s2d(b"1ee1").unwrap_err()); + assert_eq!(Error::MalformedInput, s2d(b"1e.1").unwrap_err()); + assert_eq!(Error::InputTooShort, s2d(b"").unwrap_err()); + assert_eq!(Error::InputTooLong, s2d(b"123456789012345678").unwrap_err()); + assert_eq!(Error::InputTooLong, s2d(b"1e12345").unwrap_err()); +} + +#[test] +fn test_basic() { + assert_eq!(0.0, s2d(b"0").unwrap()); + assert_eq!(-0.0, s2d(b"-0").unwrap()); + assert_eq!(1.0, s2d(b"1").unwrap()); + assert_eq!(2.0, s2d(b"2").unwrap()); + assert_eq!(123456789.0, s2d(b"123456789").unwrap()); + assert_eq!(123.456, s2d(b"123.456").unwrap()); + assert_eq!(123.456, s2d(b"123456e-3").unwrap()); + assert_eq!(123.456, s2d(b"1234.56e-1").unwrap()); + assert_eq!(1.453, s2d(b"1.453").unwrap()); + assert_eq!(1453.0, s2d(b"1.453e+3").unwrap()); + assert_eq!(0.0, s2d(b".0").unwrap()); + assert_eq!(1.0, s2d(b"1e0").unwrap()); + assert_eq!(1.0, s2d(b"1E0").unwrap()); + assert_eq!(1.0, s2d(b"000001.000000").unwrap()); +} + +#[test] +fn test_min_max() { + assert_eq!( + 1.7976931348623157e308, + s2d(b"1.7976931348623157e308").unwrap(), + ); + assert_eq!(5E-324, s2d(b"5E-324").unwrap()); +} + +#[test] +fn test_mantissa_rounding_overflow() { + // This results in binary mantissa that is all ones and requires rounding up + // because it is closer to 1 than to the next smaller float. This is a + // regression test that the mantissa overflow is handled correctly by + // increasing the exponent. + assert_eq!(1.0, s2d(b"0.99999999999999999").unwrap()); + // This number overflows the mantissa *and* the IEEE exponent. + assert_eq!(f64::INFINITY, s2d(b"1.7976931348623159e308").unwrap()); +} + +#[test] +fn test_underflow() { + assert_eq!(0.0, s2d(b"2.4e-324").unwrap()); + assert_eq!(0.0, s2d(b"1e-324").unwrap()); + assert_eq!(0.0, s2d(b"9.99999e-325").unwrap()); + // These are just about halfway between 0 and the smallest float. + // The first is just below the halfway point, the second just above. + assert_eq!(0.0, s2d(b"2.4703282292062327e-324").unwrap()); + assert_eq!(5e-324, s2d(b"2.4703282292062328e-324").unwrap()); +} + +#[test] +fn test_overflow() { + assert_eq!(f64::INFINITY, s2d(b"2e308").unwrap()); + assert_eq!(f64::INFINITY, s2d(b"1e309").unwrap()); +} + +#[test] +fn test_table_size_denormal() { + assert_eq!(5e-324, s2d(b"4.9406564584124654e-324").unwrap()); +} + +#[test] +fn test_issue157() { + assert_eq!( + 1.2999999999999999E+154, + s2d(b"1.2999999999999999E+154").unwrap(), + ); +} diff -Nru cargo-0.44.1/vendor/ryu/tests/s2f_test.rs cargo-0.47.0/vendor/ryu/tests/s2f_test.rs --- cargo-0.44.1/vendor/ryu/tests/s2f_test.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/ryu/tests/s2f_test.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,77 @@ +// Translated from C to Rust. The original C code can be found at +// https://github.com/ulfjack/ryu and carries the following license: +// +// Copyright 2018 Ulf Adams +// +// The contents of this file may be used under the terms of the Apache License, +// Version 2.0. +// +// (See accompanying file LICENSE-Apache or copy at +// http://www.apache.org/licenses/LICENSE-2.0) +// +// Alternatively, the contents of this file may be used under the terms of +// the Boost Software License, Version 1.0. +// (See accompanying file LICENSE-Boost or copy at +// https://www.boost.org/LICENSE_1_0.txt) +// +// Unless required by applicable law or agreed to in writing, this software +// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. + +#![allow(dead_code)] + +#[path = "../src/common.rs"] +mod common; + +#[path = "../src/d2s_full_table.rs"] +mod d2s_full_table; + +#[path = "../src/d2s_intrinsics.rs"] +mod d2s_intrinsics; + +#[path = "../src/d2s.rs"] +mod d2s; + +#[path = "../src/f2s_intrinsics.rs"] +mod f2s_intrinsics; + +#[path = "../src/f2s.rs"] +mod f2s; + +#[path = "../src/s2f.rs"] +mod s2f; + +#[path = "../src/parse.rs"] +mod parse; + +use crate::parse::Error; +use crate::s2f::s2f; + +impl PartialEq for Error { + fn eq(&self, other: &Self) -> bool { + *self as u8 == *other as u8 + } +} + +#[test] +fn test_basic() { + assert_eq!(0.0, s2f(b"0").unwrap()); + assert_eq!(-0.0, s2f(b"-0").unwrap()); + assert_eq!(1.0, s2f(b"1").unwrap()); + assert_eq!(-1.0, s2f(b"-1").unwrap()); + assert_eq!(123456792.0, s2f(b"123456789").unwrap()); + assert_eq!(299792448.0, s2f(b"299792458").unwrap()); +} + +#[test] +fn test_min_max() { + assert_eq!(1e-45, s2f(b"1e-45").unwrap()); + assert_eq!(f32::MIN_POSITIVE, s2f(b"1.1754944e-38").unwrap()); + assert_eq!(f32::MAX, s2f(b"3.4028235e+38").unwrap()); +} + +#[test] +fn test_mantissa_rounding_overflow() { + assert_eq!(1.0, s2f(b"0.999999999").unwrap()); + assert_eq!(f32::INFINITY, s2f(b"3.4028236e+38").unwrap()); +} diff -Nru cargo-0.44.1/vendor/semver/.cargo-checksum.json cargo-0.47.0/vendor/semver/.cargo-checksum.json --- cargo-0.44.1/vendor/semver/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/semver/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"} \ No newline at end of file +{"files":{},"package":"394cec28fa623e00903caf7ba4fa6fb9a0e260280bb8cdbbba029611108a0190"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/semver/Cargo.toml cargo-0.47.0/vendor/semver/Cargo.toml --- cargo-0.44.1/vendor/semver/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/semver/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,34 +12,32 @@ [package] name = "semver" -version = "0.9.0" +version = "0.10.0" authors = ["Steve Klabnik ", "The Rust Project Developers"] description = "Semantic version parsing and comparison.\n" homepage = "https://docs.rs/crate/semver/" documentation = "https://docs.rs/crate/semver/" readme = "README.md" +keywords = ["version", "semantic", "compare"] +categories = ["development-tools", "parser-implementations"] license = "MIT/Apache-2.0" repository = "https://github.com/steveklabnik/semver" +[dependencies.diesel] +version = "1.1" +optional = true + [dependencies.semver-parser] version = "0.7.0" [dependencies.serde] version = "1.0" optional = true -[dev-dependencies.crates-index] -version = "0.5.0" - -[dev-dependencies.serde_json] -version = "1.0" - [dev-dependencies.serde_derive] version = "1.0" -[dev-dependencies.tempdir] -version = "0.3.4" +[dev-dependencies.serde_json] +version = "1.0" [features] +ci = ["serde", "diesel/sqlite"] default = [] -ci = ["serde"] -[badges.travis-ci] -repository = "steveklabnik/semver" diff -Nru cargo-0.44.1/vendor/semver/LICENSE-APACHE cargo-0.47.0/vendor/semver/LICENSE-APACHE --- cargo-0.44.1/vendor/semver/LICENSE-APACHE 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/semver/LICENSE-APACHE 2020-10-01 21:38:28.000000000 +0000 @@ -1,201 +1,201 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff -Nru cargo-0.44.1/vendor/semver/LICENSE-MIT cargo-0.47.0/vendor/semver/LICENSE-MIT --- cargo-0.44.1/vendor/semver/LICENSE-MIT 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/semver/LICENSE-MIT 2020-10-01 21:38:28.000000000 +0000 @@ -1,25 +1,25 @@ -Copyright (c) 2014 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.44.1/vendor/semver/README.md cargo-0.47.0/vendor/semver/README.md --- cargo-0.44.1/vendor/semver/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/semver/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -1,103 +1,103 @@ -semver -====== - -Semantic version parsing and comparison. - -[![Build Status](https://api.travis-ci.org/steveklabnik/semver.svg?branch=master)](https://travis-ci.org/steveklabnik/semver) - -[Documentation](https://steveklabnik.github.io/semver) - -Semantic versioning (see http://semver.org/) is a set of rules for -assigning version numbers. - -## SemVer and the Rust ecosystem - -Rust itself follows the SemVer specification, as does its standard libraries. The two are -not tied together. - -[Cargo](https://crates.io), Rust's package manager, uses SemVer to determine which versions of -packages you need installed. - -## Installation - -To use `semver`, add this to your `[dependencies]` section: - -```toml -semver = "0.7.0" -``` - -And this to your crate root: - -```rust -extern crate semver; -``` - -## Versions - -At its simplest, the `semver` crate allows you to construct `Version` objects using the `parse` -method: - -```rust -use semver::Version; - -assert!(Version::parse("1.2.3") == Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: vec!(), - build: vec!(), -})); -``` - -If you have multiple `Version`s, you can use the usual comparison operators to compare them: - -```rust -use semver::Version; - -assert!(Version::parse("1.2.3-alpha") != Version::parse("1.2.3-beta")); -assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.0")); -``` - -## Requirements - -The `semver` crate also provides the ability to compare requirements, which are more complex -comparisons. - -For example, creating a requirement that only matches versions greater than or -equal to 1.0.0: - -```rust -use semver::Version; -use semver::VersionReq; - -let r = VersionReq::parse(">= 1.0.0").unwrap(); -let v = Version::parse("1.0.0").unwrap(); - -assert!(r.to_string() == ">= 1.0.0".to_string()); -assert!(r.matches(&v)) -``` - -It also allows parsing of `~x.y.z` and `^x.y.z` requirements as defined at -https://www.npmjs.org/doc/misc/semver.html - -**Tilde requirements** specify a minimal version with some updates: - -```notrust -~1.2.3 := >=1.2.3 <1.3.0 -~1.2 := >=1.2.0 <1.3.0 -~1 := >=1.0.0 <2.0.0 -``` - -**Caret requirements** allow SemVer compatible updates to a specified version, -`0.x` and `0.x+1` are not considered compatible, but `1.x` and `1.x+1` are. - -`0.0.x` is not considered compatible with any other version. -Missing minor and patch versions are desugared to `0` but allow flexibility for that value. - -```notrust -^1.2.3 := >=1.2.3 <2.0.0 -^0.2.3 := >=0.2.3 <0.3.0 -^0.0.3 := >=0.0.3 <0.0.4 -^0.0 := >=0.0.0 <0.1.0 -^0 := >=0.0.0 <1.0.0 -``` +semver +====== + +Semantic version parsing and comparison. + +![Build Status](https://github.com/steveklabnik/semver/workflows/CI/badge.svg) + +[Documentation](https://steveklabnik.github.io/semver) + +Semantic versioning (see https://semver.org/) is a set of rules for +assigning version numbers. + +## SemVer and the Rust ecosystem + +Rust itself follows the SemVer specification, as does its standard libraries. The two are +not tied together. + +[Cargo](https://crates.io), Rust's package manager, uses SemVer to determine which versions of +packages you need installed. + +## Installation + +To use `semver`, add this to your `[dependencies]` section: + +```toml +semver = "0.9.0" +``` + +And this to your crate root: + +```rust +extern crate semver; +``` + +## Versions + +At its simplest, the `semver` crate allows you to construct `Version` objects using the `parse` +method: + +```rust +use semver::Version; + +assert!(Version::parse("1.2.3") == Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: vec!(), + build: vec!(), +})); +``` + +If you have multiple `Version`s, you can use the usual comparison operators to compare them: + +```rust +use semver::Version; + +assert!(Version::parse("1.2.3-alpha") != Version::parse("1.2.3-beta")); +assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.0")); +``` + +## Requirements + +The `semver` crate also provides the ability to compare requirements, which are more complex +comparisons. + +For example, creating a requirement that only matches versions greater than or +equal to 1.0.0: + +```rust +use semver::Version; +use semver::VersionReq; + +let r = VersionReq::parse(">= 1.0.0").unwrap(); +let v = Version::parse("1.0.0").unwrap(); + +assert!(r.to_string() == ">= 1.0.0".to_string()); +assert!(r.matches(&v)) +``` + +It also allows parsing of `~x.y.z` and `^x.y.z` requirements as defined at +https://www.npmjs.com/package/semver + +**Tilde requirements** specify a minimal version with some updates: + +```notrust +~1.2.3 := >=1.2.3 <1.3.0 +~1.2 := >=1.2.0 <1.3.0 +~1 := >=1.0.0 <2.0.0 +``` + +**Caret requirements** allow SemVer compatible updates to a specified version, +`0.x` and `0.x+1` are not considered compatible, but `1.x` and `1.x+1` are. + +`0.0.x` is not considered compatible with any other version. +Missing minor and patch versions are desugared to `0` but allow flexibility for that value. + +```notrust +^1.2.3 := >=1.2.3 <2.0.0 +^0.2.3 := >=0.2.3 <0.3.0 +^0.0.3 := >=0.0.3 <0.0.4 +^0.0 := >=0.0.0 <0.1.0 +^0 := >=0.0.0 <1.0.0 +``` diff -Nru cargo-0.44.1/vendor/semver/src/diesel_impls.rs cargo-0.47.0/vendor/semver/src/diesel_impls.rs --- cargo-0.44.1/vendor/semver/src/diesel_impls.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/semver/src/diesel_impls.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,45 @@ +use diesel::backend::Backend; +use diesel::deserialize::{self, FromSql}; +use diesel::serialize::{self, IsNull, Output, ToSql}; +use diesel::sql_types::Text; +use std::io::Write; + +use {Version, VersionReq}; + +impl FromSql for Version +where + DB: Backend, + *const str: FromSql, +{ + fn from_sql(input: Option<&DB::RawValue>) -> deserialize::Result { + let str_ptr = <*const str as FromSql>::from_sql(input)?; + let s = unsafe { &*str_ptr }; + s.parse().map_err(Into::into) + } +} + +impl ToSql for Version { + fn to_sql(&self, out: &mut Output) -> serialize::Result { + write!(out, "{}", self)?; + Ok(IsNull::No) + } +} + +impl FromSql for VersionReq +where + DB: Backend, + *const str: FromSql, +{ + fn from_sql(input: Option<&DB::RawValue>) -> deserialize::Result { + let str_ptr = <*const str as FromSql>::from_sql(input)?; + let s = unsafe { &*str_ptr }; + s.parse().map_err(Into::into) + } +} + +impl ToSql for VersionReq { + fn to_sql(&self, out: &mut Output) -> serialize::Result { + write!(out, "{}", self)?; + Ok(IsNull::No) + } +} diff -Nru cargo-0.44.1/vendor/semver/src/lib.rs cargo-0.47.0/vendor/semver/src/lib.rs --- cargo-0.44.1/vendor/semver/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/semver/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,182 +1,222 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Semantic version parsing and comparison. -//! -//! Semantic versioning (see http://semver.org/) is a set of rules for -//! assigning version numbers. -//! -//! ## SemVer overview -//! -//! Given a version number MAJOR.MINOR.PATCH, increment the: -//! -//! 1. MAJOR version when you make incompatible API changes, -//! 2. MINOR version when you add functionality in a backwards-compatible -//! manner, and -//! 3. PATCH version when you make backwards-compatible bug fixes. -//! -//! Additional labels for pre-release and build metadata are available as -//! extensions to the MAJOR.MINOR.PATCH format. -//! -//! Any references to 'the spec' in this documentation refer to [version 2.0 of -//! the SemVer spec](http://semver.org/spec/v2.0.0.html). -//! -//! ## SemVer and the Rust ecosystem -//! -//! Rust itself follows the SemVer specification, as does its standard -//! libraries. The two are not tied together. -//! -//! [Cargo](http://crates.io), Rust's package manager, uses SemVer to determine -//! which versions of packages you need installed. -//! -//! ## Versions -//! -//! At its simplest, the `semver` crate allows you to construct `Version` -//! objects using the `parse` method: -//! -//! ```{rust} -//! use semver::Version; -//! -//! assert!(Version::parse("1.2.3") == Ok(Version { -//! major: 1, -//! minor: 2, -//! patch: 3, -//! pre: vec!(), -//! build: vec!(), -//! })); -//! ``` -//! -//! If you have multiple `Version`s, you can use the usual comparison operators -//! to compare them: -//! -//! ```{rust} -//! use semver::Version; -//! -//! assert!(Version::parse("1.2.3-alpha") != Version::parse("1.2.3-beta")); -//! assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.0")); -//! ``` -//! -//! If you explicitly need to modify a Version, SemVer also allows you to -//! increment the major, minor, and patch numbers in accordance with the spec. -//! -//! Please note that in order to do this, you must use a mutable Version: -//! -//! ```{rust} -//! use semver::Version; -//! -//! let mut bugfix_release = Version::parse("1.0.0").unwrap(); -//! bugfix_release.increment_patch(); -//! -//! assert_eq!(Ok(bugfix_release), Version::parse("1.0.1")); -//! ``` -//! -//! When incrementing the minor version number, the patch number resets to zero -//! (in accordance with section 7 of the spec) -//! -//! ```{rust} -//! use semver::Version; -//! -//! let mut feature_release = Version::parse("1.4.6").unwrap(); -//! feature_release.increment_minor(); -//! -//! assert_eq!(Ok(feature_release), Version::parse("1.5.0")); -//! ``` -//! -//! Similarly, when incrementing the major version number, the patch and minor -//! numbers reset to zero (in accordance with section 8 of the spec) -//! -//! ```{rust} -//! use semver::Version; -//! -//! let mut chrome_release = Version::parse("41.5.5377").unwrap(); -//! chrome_release.increment_major(); -//! -//! assert_eq!(Ok(chrome_release), Version::parse("42.0.0")); -//! ``` -//! -//! ## Requirements -//! -//! The `semver` crate also provides the ability to compare requirements, which -//! are more complex comparisons. -//! -//! For example, creating a requirement that only matches versions greater than -//! or equal to 1.0.0: -//! -//! ```{rust} -//! # #![allow(unstable)] -//! use semver::Version; -//! use semver::VersionReq; -//! -//! let r = VersionReq::parse(">= 1.0.0").unwrap(); -//! let v = Version::parse("1.0.0").unwrap(); -//! -//! assert!(r.to_string() == ">= 1.0.0".to_string()); -//! assert!(r.matches(&v)) -//! ``` -//! -//! It also allows parsing of `~x.y.z` and `^x.y.z` requirements as defined at -//! https://www.npmjs.org/doc/misc/semver.html -//! -//! **Tilde requirements** specify a minimal version with some updates: -//! -//! ```notrust -//! ~1.2.3 := >=1.2.3 <1.3.0 -//! ~1.2 := >=1.2.0 <1.3.0 -//! ~1 := >=1.0.0 <2.0.0 -//! ``` -//! -//! **Caret requirements** allow SemVer compatible updates to a specified -//! verion, `0.x` and `0.x+1` are not considered compatible, but `1.x` and -//! `1.x+1` are. -//! -//! `0.0.x` is not considered compatible with any other version. -//! Missing minor and patch versions are desugared to `0` but allow flexibility -//! for that value. -//! -//! ```notrust -//! ^1.2.3 := >=1.2.3 <2.0.0 -//! ^0.2.3 := >=0.2.3 <0.3.0 -//! ^0.0.3 := >=0.0.3 <0.0.4 -//! ^0.0 := >=0.0.0 <0.1.0 -//! ^0 := >=0.0.0 <1.0.0 -//! ``` -//! -//! **Wildcard requirements** allows parsing of version requirements of the -//! formats `*`, `x.*` and `x.y.*`. -//! -//! ```notrust -//! * := >=0.0.0 -//! 1.* := >=1.0.0 <2.0.0 -//! 1.2.* := >=1.2.0 <1.3.0 -//! ``` - -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://www.rust-lang.org/favicon.ico")] -#![deny(missing_docs)] -#![cfg_attr(test, deny(warnings))] - -extern crate semver_parser; - -// Serialization and deserialization support for version numbers -#[cfg(feature = "serde")] -extern crate serde; - -// We take the common approach of keeping our own module system private, and -// just re-exporting the interface that we want. - -pub use version::{Version, Identifier, SemVerError}; -pub use version::Identifier::{Numeric, AlphaNumeric}; -pub use version_req::{VersionReq, ReqParseError}; - -// SemVer-compliant versions. -mod version; - -// advanced version comparisons -mod version_req; +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Semantic version parsing and comparison. +//! +//! Semantic versioning (see http://semver.org/) is a set of rules for +//! assigning version numbers. +//! +//! ## SemVer overview +//! +//! Given a version number MAJOR.MINOR.PATCH, increment the: +//! +//! 1. MAJOR version when you make incompatible API changes, +//! 2. MINOR version when you add functionality in a backwards-compatible +//! manner, and +//! 3. PATCH version when you make backwards-compatible bug fixes. +//! +//! Additional labels for pre-release and build metadata are available as +//! extensions to the MAJOR.MINOR.PATCH format. +//! +//! Any references to 'the spec' in this documentation refer to [version 2.0 of +//! the SemVer spec](http://semver.org/spec/v2.0.0.html). +//! +//! ## SemVer and the Rust ecosystem +//! +//! Rust itself follows the SemVer specification, as does its standard +//! libraries. The two are not tied together. +//! +//! [Cargo](http://crates.io), Rust's package manager, uses SemVer to determine +//! which versions of packages you need installed. +//! +//! ## Versions +//! +//! [`Version`]: struct.Version.html +//! +//! At its simplest, the `semver` crate allows you to construct [`Version`] +//! objects using the [`parse`] method: +//! +//! [`parse`]: struct.Version.html#method.parse +//! +//! ```{rust} +//! use semver::Version; +//! +//! assert!(Version::parse("1.2.3") == Ok(Version { +//! major: 1, +//! minor: 2, +//! patch: 3, +//! pre: vec!(), +//! build: vec!(), +//! })); +//! ``` +//! +//! If you have multiple [`Version`]s, you can use the usual comparison operators +//! to compare them: +//! +//! ```{rust} +//! use semver::Version; +//! +//! assert!(Version::parse("1.2.3-alpha") != Version::parse("1.2.3-beta")); +//! assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.0")); +//! ``` +//! +//! If you explicitly need to modify a [`Version`], SemVer also allows you to +//! increment the major, minor, and patch numbers in accordance with the spec. +//! +//! Please note that in order to do this, you must use a mutable Version: +//! +//! ```{rust} +//! use semver::Version; +//! +//! # fn try_increment_patch() -> Result<(), Box<::std::error::Error>> { +//! let mut bugfix_release = Version::parse("1.0.0")?; +//! bugfix_release.increment_patch(); +//! +//! assert_eq!(Ok(bugfix_release), Version::parse("1.0.1")); +//! # Ok(()) +//! # } +//! # fn main() { +//! # try_increment_patch().unwrap(); +//! # } +//! ``` +//! +//! When incrementing the minor version number, the patch number resets to zero +//! (in accordance with section 7 of the spec) +//! +//! ```{rust} +//! use semver::Version; +//! +//! # fn try_increment_minor() -> Result<(), Box<::std::error::Error>> { +//! let mut feature_release = Version::parse("1.4.6")?; +//! feature_release.increment_minor(); +//! +//! assert_eq!(Ok(feature_release), Version::parse("1.5.0")); +//! # Ok(()) +//! # } +//! # fn main() { +//! # try_increment_minor().unwrap(); +//! # } +//! ``` +//! +//! Similarly, when incrementing the major version number, the patch and minor +//! numbers reset to zero (in accordance with section 8 of the spec) +//! +//! ```{rust} +//! use semver::Version; +//! +//! # fn try_increment_major() -> Result<(), Box<::std::error::Error>> { +//! let mut chrome_release = Version::parse("41.5.5377")?; +//! chrome_release.increment_major(); +//! +//! assert_eq!(Ok(chrome_release), Version::parse("42.0.0")); +//! # Ok(()) +//! # } +//! # fn main() { +//! # try_increment_major().unwrap(); +//! # } +//! ``` +//! +//! ## Requirements +//! +//! The `semver` crate also provides the ability to compare requirements, which +//! are more complex comparisons. +//! +//! For example, creating a requirement that only matches versions greater than +//! or equal to 1.0.0: +//! +//! ```{rust} +//! # #![allow(unstable)] +//! use semver::Version; +//! use semver::VersionReq; +//! +//! # fn try_compare() -> Result<(), Box<::std::error::Error>> { +//! let r = VersionReq::parse(">= 1.0.0")?; +//! let v = Version::parse("1.0.0")?; +//! +//! assert!(r.to_string() == ">=1.0.0".to_string()); +//! assert!(r.matches(&v)); +//! # Ok(()) +//! # } +//! # fn main() { +//! # try_compare().unwrap(); +//! # } +//! ``` +//! +//! It also allows parsing of `~x.y.z` and `^x.y.z` requirements as defined at +//! https://www.npmjs.com/package/semver +//! +//! **Tilde requirements** specify a minimal version with some updates: +//! +//! ```notrust +//! ~1.2.3 := >=1.2.3 <1.3.0 +//! ~1.2 := >=1.2.0 <1.3.0 +//! ~1 := >=1.0.0 <2.0.0 +//! ``` +//! +//! **Caret requirements** allow SemVer compatible updates to a specified +//! verion, `0.x` and `0.x+1` are not considered compatible, but `1.x` and +//! `1.x+1` are. +//! +//! `0.0.x` is not considered compatible with any other version. +//! Missing minor and patch versions are desugared to `0` but allow flexibility +//! for that value. +//! +//! ```notrust +//! ^1.2.3 := >=1.2.3 <2.0.0 +//! ^0.2.3 := >=0.2.3 <0.3.0 +//! ^0.0.3 := >=0.0.3 <0.0.4 +//! ^0.0 := >=0.0.0 <0.1.0 +//! ^0 := >=0.0.0 <1.0.0 +//! ``` +//! +//! **Wildcard requirements** allows parsing of version requirements of the +//! formats `*`, `x.*` and `x.y.*`. +//! +//! ```notrust +//! * := >=0.0.0 +//! 1.* := >=1.0.0 <2.0.0 +//! 1.2.* := >=1.2.0 <1.3.0 +//! ``` + +#![doc( + html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://www.rust-lang.org/favicon.ico", + html_root_url = "https://docs.rs/semver" +)] +#![deny(missing_docs)] +#![cfg_attr(test, deny(warnings))] + +extern crate semver_parser; + +// Serialization and deserialization support for version numbers +#[cfg(feature = "serde")] +extern crate serde; + +// Database support for version numbers +#[cfg(feature = "diesel")] +#[macro_use] +extern crate diesel; + +// We take the common approach of keeping our own module system private, and +// just re-exporting the interface that we want. + +pub use version::Identifier::{AlphaNumeric, Numeric}; +pub use version::{Identifier, SemVerError, Version}; +pub use version_req::{ReqParseError, VersionReq}; + +// SemVer-compliant versions. +mod version; + +// advanced version comparisons +mod version_req; + +#[cfg(feature = "diesel")] +// Diesel support +mod diesel_impls; diff -Nru cargo-0.44.1/vendor/semver/src/version_req.rs cargo-0.47.0/vendor/semver/src/version_req.rs --- cargo-0.44.1/vendor/semver/src/version_req.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/semver/src/version_req.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,895 +1,1031 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::error::Error; -use std::fmt; -use std::result; -use std::str; - -use Version; -use version::Identifier; -use semver_parser; - -#[cfg(feature = "serde")] -use serde::ser::{Serialize, Serializer}; -#[cfg(feature = "serde")] -use serde::de::{self, Deserialize, Deserializer, Visitor}; - -use self::Op::{Ex, Gt, GtEq, Lt, LtEq, Tilde, Compatible, Wildcard}; -use self::WildcardVersion::{Major, Minor, Patch}; -use self::ReqParseError::*; - -/// A `VersionReq` is a struct containing a list of predicates that can apply to ranges of version -/// numbers. Matching operations can then be done with the `VersionReq` against a particular -/// version to see if it satisfies some or all of the constraints. -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -pub struct VersionReq { - predicates: Vec, -} - -impl From for VersionReq { - fn from(other: semver_parser::range::VersionReq) -> VersionReq { - VersionReq { predicates: other.predicates.into_iter().map(From::from).collect() } - } -} - -#[cfg(feature = "serde")] -impl Serialize for VersionReq { - fn serialize(&self, serializer: S) -> result::Result - where S: Serializer - { - // Serialize VersionReq as a string. - serializer.collect_str(self) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for VersionReq { - fn deserialize(deserializer: D) -> result::Result - where D: Deserializer<'de> - { - struct VersionReqVisitor; - - /// Deserialize `VersionReq` from a string. - impl<'de> Visitor<'de> for VersionReqVisitor { - type Value = VersionReq; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a SemVer version requirement as a string") - } - - fn visit_str(self, v: &str) -> result::Result - where E: de::Error - { - VersionReq::parse(v).map_err(de::Error::custom) - } - } - - deserializer.deserialize_str(VersionReqVisitor) - } -} - -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -enum WildcardVersion { - Major, - Minor, - Patch, -} - -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -enum Op { - Ex, // Exact - Gt, // Greater than - GtEq, // Greater than or equal to - Lt, // Less than - LtEq, // Less than or equal to - Tilde, // e.g. ~1.0.0 - Compatible, // compatible by definition of semver, indicated by ^ - Wildcard(WildcardVersion), // x.y.*, x.*, * -} - -impl From for Op { - fn from(other: semver_parser::range::Op) -> Op { - use semver_parser::range; - match other { - range::Op::Ex => Op::Ex, - range::Op::Gt => Op::Gt, - range::Op::GtEq => Op::GtEq, - range::Op::Lt => Op::Lt, - range::Op::LtEq => Op::LtEq, - range::Op::Tilde => Op::Tilde, - range::Op::Compatible => Op::Compatible, - range::Op::Wildcard(version) => { - match version { - range::WildcardVersion::Major => Op::Wildcard(WildcardVersion::Major), - range::WildcardVersion::Minor => Op::Wildcard(WildcardVersion::Minor), - range::WildcardVersion::Patch => Op::Wildcard(WildcardVersion::Patch), - } - } - } - } -} - -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -struct Predicate { - op: Op, - major: u64, - minor: Option, - patch: Option, - pre: Vec, -} - -impl From for Predicate { - fn from(other: semver_parser::range::Predicate) -> Predicate { - Predicate { - op: From::from(other.op), - major: other.major, - minor: other.minor, - patch: other.patch, - pre: other.pre.into_iter().map(From::from).collect(), - } - } -} - -/// A `ReqParseError` is returned from methods which parse a string into a `VersionReq`. Each -/// enumeration is one of the possible errors that can occur. -#[derive(Clone, Debug, PartialEq)] -pub enum ReqParseError { - /// The given version requirement is invalid. - InvalidVersionRequirement, - /// You have already provided an operation, such as `=`, `~`, or `^`. Only use one. - OpAlreadySet, - /// The sigil you have written is not correct. - InvalidSigil, - /// All components of a version must be numeric. - VersionComponentsMustBeNumeric, - /// There was an error parsing an identifier. - InvalidIdentifier, - /// At least a major version is required. - MajorVersionRequired, - /// An unimplemented version requirement. - UnimplementedVersionRequirement, - /// This form of requirement is deprecated. - DeprecatedVersionRequirement(VersionReq), -} - -impl fmt::Display for ReqParseError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.description().fmt(f) - } -} - -impl Error for ReqParseError { - fn description(&self) -> &str { - match self { - &InvalidVersionRequirement => "the given version requirement is invalid", - &OpAlreadySet => { - "you have already provided an operation, such as =, ~, or ^; only use one" - }, - &InvalidSigil => "the sigil you have written is not correct", - &VersionComponentsMustBeNumeric => "version components must be numeric", - &InvalidIdentifier => "invalid identifier", - &MajorVersionRequired => "at least a major version number is required", - &UnimplementedVersionRequirement => { - "the given version requirement is not implemented, yet" - }, - &DeprecatedVersionRequirement(_) => "This requirement is deprecated", - } - } -} - -impl From for ReqParseError { - fn from(other: String) -> ReqParseError { - match &*other { - "Null is not a valid VersionReq" => ReqParseError::InvalidVersionRequirement, - "VersionReq did not parse properly." => ReqParseError::OpAlreadySet, - _ => ReqParseError::InvalidVersionRequirement, - } - } -} - -impl VersionReq { - /// `any()` is a factory method which creates a `VersionReq` with no constraints. In other - /// words, any version will match against it. - /// - /// # Examples - /// - /// ``` - /// use semver::VersionReq; - /// - /// let anything = VersionReq::any(); - /// ``` - pub fn any() -> VersionReq { - VersionReq { predicates: vec![] } - } - - /// `parse()` is the main constructor of a `VersionReq`. It takes a string like `"^1.2.3"` - /// and turns it into a `VersionReq` that matches that particular constraint. - /// - /// A `Result` is returned which contains a `ReqParseError` if there was a problem parsing the - /// `VersionReq`. - /// - /// # Examples - /// - /// ``` - /// use semver::VersionReq; - /// - /// let version = VersionReq::parse("=1.2.3"); - /// let version = VersionReq::parse(">1.2.3"); - /// let version = VersionReq::parse("<1.2.3"); - /// let version = VersionReq::parse("~1.2.3"); - /// let version = VersionReq::parse("^1.2.3"); - /// let version = VersionReq::parse("1.2.3"); // synonym for ^1.2.3 - /// let version = VersionReq::parse("<=1.2.3"); - /// let version = VersionReq::parse(">=1.2.3"); - /// ``` - /// - /// This example demonstrates error handling, and will panic. - /// - /// ```should-panic - /// use semver::VersionReq; - /// - /// let version = match VersionReq::parse("not a version") { - /// Ok(version) => version, - /// Err(e) => panic!("There was a problem parsing: {}", e), - /// } - /// ``` - pub fn parse(input: &str) -> Result { - let res = semver_parser::range::parse(input); - - if let Ok(v) = res { - return Ok(From::from(v)); - } - - return match VersionReq::parse_deprecated(input) { - Some(v) => { - Err(ReqParseError::DeprecatedVersionRequirement(v)) - } - None => Err(From::from(res.err().unwrap())), - } - } - - fn parse_deprecated(version: &str) -> Option { - return match version { - ".*" => Some(VersionReq::any()), - "0.1.0." => Some(VersionReq::parse("0.1.0").unwrap()), - "0.3.1.3" => Some(VersionReq::parse("0.3.13").unwrap()), - "0.2*" => Some(VersionReq::parse("0.2.*").unwrap()), - "*.0" => Some(VersionReq::any()), - _ => None, - } - } - - /// `exact()` is a factory method which creates a `VersionReq` with one exact constraint. - /// - /// # Examples - /// - /// ``` - /// use semver::VersionReq; - /// use semver::Version; - /// - /// let version = Version { major: 1, minor: 1, patch: 1, pre: vec![], build: vec![] }; - /// let exact = VersionReq::exact(&version); - /// ``` - pub fn exact(version: &Version) -> VersionReq { - VersionReq { predicates: vec![Predicate::exact(version)] } - } - - /// `matches()` matches a given `Version` against this `VersionReq`. - /// - /// # Examples - /// - /// ``` - /// use semver::VersionReq; - /// use semver::Version; - /// - /// let version = Version { major: 1, minor: 1, patch: 1, pre: vec![], build: vec![] }; - /// let exact = VersionReq::exact(&version); - /// - /// assert!(exact.matches(&version)); - /// ``` - pub fn matches(&self, version: &Version) -> bool { - // no predicates means anything matches - if self.predicates.is_empty() { - return true; - } - - self.predicates.iter().all(|p| p.matches(version)) && - self.predicates.iter().any(|p| p.pre_tag_is_compatible(version)) - } -} - -impl str::FromStr for VersionReq { - type Err = ReqParseError; - - fn from_str(s: &str) -> Result { - VersionReq::parse(s) - } -} - -impl Predicate { - fn exact(version: &Version) -> Predicate { - Predicate { - op: Ex, - major: version.major, - minor: Some(version.minor), - patch: Some(version.patch), - pre: version.pre.clone(), - } - } - - /// `matches()` takes a `Version` and determines if it matches this particular `Predicate`. - pub fn matches(&self, ver: &Version) -> bool { - match self.op { - Ex => self.is_exact(ver), - Gt => self.is_greater(ver), - GtEq => self.is_exact(ver) || self.is_greater(ver), - Lt => !self.is_exact(ver) && !self.is_greater(ver), - LtEq => !self.is_greater(ver), - Tilde => self.matches_tilde(ver), - Compatible => self.is_compatible(ver), - Wildcard(_) => self.matches_wildcard(ver), - } - } - - fn is_exact(&self, ver: &Version) -> bool { - if self.major != ver.major { - return false; - } - - match self.minor { - Some(minor) => { - if minor != ver.minor { - return false; - } - } - None => return true, - } - - match self.patch { - Some(patch) => { - if patch != ver.patch { - return false; - } - } - None => return true, - } - - if self.pre != ver.pre { - return false; - } - - true - } - - // https://docs.npmjs.com/misc/semver#prerelease-tags - fn pre_tag_is_compatible(&self, ver: &Version) -> bool { - // If a version has a prerelease tag (for example, 1.2.3-alpha.3) then it will - // only be - // allowed to satisfy comparator sets if at least one comparator with the same - // [major, - // minor, patch] tuple also has a prerelease tag. - !ver.is_prerelease() || - (self.major == ver.major && self.minor == Some(ver.minor) && - self.patch == Some(ver.patch) && !self.pre.is_empty()) - } - - fn is_greater(&self, ver: &Version) -> bool { - if self.major != ver.major { - return ver.major > self.major; - } - - match self.minor { - Some(minor) => { - if minor != ver.minor { - return ver.minor > minor; - } - } - None => return false, - } - - match self.patch { - Some(patch) => { - if patch != ver.patch { - return ver.patch > patch; - } - } - None => return false, - } - - if !self.pre.is_empty() { - return ver.pre.is_empty() || ver.pre > self.pre; - } - - false - } - - // see https://www.npmjs.org/doc/misc/semver.html for behavior - fn matches_tilde(&self, ver: &Version) -> bool { - let minor = match self.minor { - Some(n) => n, - None => return self.major == ver.major, - }; - - match self.patch { - Some(patch) => { - self.major == ver.major && minor == ver.minor && - (ver.patch > patch || (ver.patch == patch && self.pre_is_compatible(ver))) - } - None => self.major == ver.major && minor == ver.minor, - } - } - - // see https://www.npmjs.org/doc/misc/semver.html for behavior - fn is_compatible(&self, ver: &Version) -> bool { - if self.major != ver.major { - return false; - } - - let minor = match self.minor { - Some(n) => n, - None => return self.major == ver.major, - }; - - match self.patch { - Some(patch) => { - if self.major == 0 { - if minor == 0 { - ver.minor == minor && ver.patch == patch && self.pre_is_compatible(ver) - } else { - ver.minor == minor && - (ver.patch > patch || (ver.patch == patch && self.pre_is_compatible(ver))) - } - } else { - ver.minor > minor || - (ver.minor == minor && - (ver.patch > patch || (ver.patch == patch && self.pre_is_compatible(ver)))) - } - } - None => { - if self.major == 0 { - ver.minor == minor - } else { - ver.minor >= minor - } - } - } - } - - fn pre_is_compatible(&self, ver: &Version) -> bool { - ver.pre.is_empty() || ver.pre >= self.pre - } - - // see https://www.npmjs.org/doc/misc/semver.html for behavior - fn matches_wildcard(&self, ver: &Version) -> bool { - match self.op { - Wildcard(Major) => true, - Wildcard(Minor) => self.major == ver.major, - Wildcard(Patch) => { - match self.minor { - Some(minor) => self.major == ver.major && minor == ver.minor, - None => { - // minor and patch version astericks mean match on major - self.major == ver.major - } - } - } - _ => false, // unreachable - } - } -} - -impl fmt::Display for VersionReq { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - if self.predicates.is_empty() { - try!(write!(fmt, "*")); - } else { - for (i, ref pred) in self.predicates.iter().enumerate() { - if i == 0 { - try!(write!(fmt, "{}", pred)); - } else { - try!(write!(fmt, ", {}", pred)); - } - } - } - - Ok(()) - } -} - -impl fmt::Display for Predicate { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match self.op { - Wildcard(Major) => try!(write!(fmt, "*")), - Wildcard(Minor) => try!(write!(fmt, "{}.*", self.major)), - Wildcard(Patch) => { - if let Some(minor) = self.minor { - try!(write!(fmt, "{}.{}.*", self.major, minor)) - } else { - try!(write!(fmt, "{}.*.*", self.major)) - } - } - _ => { - try!(write!(fmt, "{}{}", self.op, self.major)); - - match self.minor { - Some(v) => try!(write!(fmt, ".{}", v)), - None => (), - } - - match self.patch { - Some(v) => try!(write!(fmt, ".{}", v)), - None => (), - } - - if !self.pre.is_empty() { - try!(write!(fmt, "-")); - for (i, x) in self.pre.iter().enumerate() { - if i != 0 { - try!(write!(fmt, ".")) - } - try!(write!(fmt, "{}", x)); - } - } - } - } - - Ok(()) - } -} - -impl fmt::Display for Op { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match *self { - Ex => try!(write!(fmt, "= ")), - Gt => try!(write!(fmt, "> ")), - GtEq => try!(write!(fmt, ">= ")), - Lt => try!(write!(fmt, "< ")), - LtEq => try!(write!(fmt, "<= ")), - Tilde => try!(write!(fmt, "~")), - Compatible => try!(write!(fmt, "^")), - // gets handled specially in Predicate::fmt - Wildcard(_) => try!(write!(fmt, "")), - } - Ok(()) - } -} - -#[cfg(test)] -mod test { - use super::{VersionReq, Op}; - use super::super::version::Version; - use std::hash::{Hash, Hasher}; - - fn req(s: &str) -> VersionReq { - VersionReq::parse(s).unwrap() - } - - fn version(s: &str) -> Version { - match Version::parse(s) { - Ok(v) => v, - Err(e) => panic!("`{}` is not a valid version. Reason: {:?}", s, e), - } - } - - fn assert_match(req: &VersionReq, vers: &[&str]) { - for ver in vers.iter() { - assert!(req.matches(&version(*ver)), "did not match {}", ver); - } - } - - fn assert_not_match(req: &VersionReq, vers: &[&str]) { - for ver in vers.iter() { - assert!(!req.matches(&version(*ver)), "matched {}", ver); - } - } - - fn calculate_hash(t: T) -> u64 { - use std::collections::hash_map::DefaultHasher; - - let mut s = DefaultHasher::new(); - t.hash(&mut s); - s.finish() - } - - #[test] - fn test_parsing_default() { - let r = req("1.0.0"); - - assert_eq!(r.to_string(), "^1.0.0".to_string()); - - assert_match(&r, &["1.0.0", "1.0.1"]); - assert_not_match(&r, &["0.9.9", "0.10.0", "0.1.0"]); - } - - #[test] - fn test_parsing_exact() { - let r = req("=1.0.0"); - - assert!(r.to_string() == "= 1.0.0".to_string()); - assert_eq!(r.to_string(), "= 1.0.0".to_string()); - - assert_match(&r, &["1.0.0"]); - assert_not_match(&r, &["1.0.1", "0.9.9", "0.10.0", "0.1.0", "1.0.0-pre"]); - - let r = req("=0.9.0"); - - assert_eq!(r.to_string(), "= 0.9.0".to_string()); - - assert_match(&r, &["0.9.0"]); - assert_not_match(&r, &["0.9.1", "1.9.0", "0.0.9"]); - - let r = req("=0.1.0-beta2.a"); - - assert_eq!(r.to_string(), "= 0.1.0-beta2.a".to_string()); - - assert_match(&r, &["0.1.0-beta2.a"]); - assert_not_match(&r, &["0.9.1", "0.1.0", "0.1.1-beta2.a", "0.1.0-beta2"]); - } - - #[test] - fn test_parse_metadata_see_issue_88_see_issue_88() { - for op in &[Op::Compatible, Op::Ex, Op::Gt, Op::GtEq, Op::Lt, Op::LtEq, Op::Tilde] { - req(&format!("{} 1.2.3+meta", op)); - } - } - - #[test] - pub fn test_parsing_greater_than() { - let r = req(">= 1.0.0"); - - assert_eq!(r.to_string(), ">= 1.0.0".to_string()); - - assert_match(&r, &["1.0.0", "2.0.0"]); - assert_not_match(&r, &["0.1.0", "0.0.1", "1.0.0-pre", "2.0.0-pre"]); - - let r = req(">= 2.1.0-alpha2"); - - assert_match(&r, &["2.1.0-alpha2", "2.1.0-alpha3", "2.1.0", "3.0.0"]); - assert_not_match(&r, - &["2.0.0", "2.1.0-alpha1", "2.0.0-alpha2", "3.0.0-alpha2"]); - } - - #[test] - pub fn test_parsing_less_than() { - let r = req("< 1.0.0"); - - assert_eq!(r.to_string(), "< 1.0.0".to_string()); - - assert_match(&r, &["0.1.0", "0.0.1"]); - assert_not_match(&r, &["1.0.0", "1.0.0-beta", "1.0.1", "0.9.9-alpha"]); - - let r = req("<= 2.1.0-alpha2"); - - assert_match(&r, &["2.1.0-alpha2", "2.1.0-alpha1", "2.0.0", "1.0.0"]); - assert_not_match(&r, - &["2.1.0", "2.2.0-alpha1", "2.0.0-alpha2", "1.0.0-alpha2"]); - } - - #[test] - pub fn test_multiple() { - let r = req("> 0.0.9, <= 2.5.3"); - assert_eq!(r.to_string(), "> 0.0.9, <= 2.5.3".to_string()); - assert_match(&r, &["0.0.10", "1.0.0", "2.5.3"]); - assert_not_match(&r, &["0.0.8", "2.5.4"]); - - let r = req("0.3.0, 0.4.0"); - assert_eq!(r.to_string(), "^0.3.0, ^0.4.0".to_string()); - assert_not_match(&r, &["0.0.8", "0.3.0", "0.4.0"]); - - let r = req("<= 0.2.0, >= 0.5.0"); - assert_eq!(r.to_string(), "<= 0.2.0, >= 0.5.0".to_string()); - assert_not_match(&r, &["0.0.8", "0.3.0", "0.5.1"]); - - let r = req("0.1.0, 0.1.4, 0.1.6"); - assert_eq!(r.to_string(), "^0.1.0, ^0.1.4, ^0.1.6".to_string()); - assert_match(&r, &["0.1.6", "0.1.9"]); - assert_not_match(&r, &["0.1.0", "0.1.4", "0.2.0"]); - - assert!(VersionReq::parse("> 0.1.0,").is_err()); - assert!(VersionReq::parse("> 0.3.0, ,").is_err()); - - let r = req(">=0.5.1-alpha3, <0.6"); - assert_eq!(r.to_string(), ">= 0.5.1-alpha3, < 0.6".to_string()); - assert_match(&r, - &["0.5.1-alpha3", "0.5.1-alpha4", "0.5.1-beta", "0.5.1", "0.5.5"]); - assert_not_match(&r, - &["0.5.1-alpha1", "0.5.2-alpha3", "0.5.5-pre", "0.5.0-pre"]); - assert_not_match(&r, &["0.6.0", "0.6.0-pre"]); - } - - #[test] - pub fn test_parsing_tilde() { - let r = req("~1"); - assert_match(&r, &["1.0.0", "1.0.1", "1.1.1"]); - assert_not_match(&r, &["0.9.1", "2.9.0", "0.0.9"]); - - let r = req("~1.2"); - assert_match(&r, &["1.2.0", "1.2.1"]); - assert_not_match(&r, &["1.1.1", "1.3.0", "0.0.9"]); - - let r = req("~1.2.2"); - assert_match(&r, &["1.2.2", "1.2.4"]); - assert_not_match(&r, &["1.2.1", "1.9.0", "1.0.9", "2.0.1", "0.1.3"]); - - let r = req("~1.2.3-beta.2"); - assert_match(&r, &["1.2.3", "1.2.4", "1.2.3-beta.2", "1.2.3-beta.4"]); - assert_not_match(&r, &["1.3.3", "1.1.4", "1.2.3-beta.1", "1.2.4-beta.2"]); - } - - #[test] - pub fn test_parsing_compatible() { - let r = req("^1"); - assert_match(&r, &["1.1.2", "1.1.0", "1.2.1", "1.0.1"]); - assert_not_match(&r, &["0.9.1", "2.9.0", "0.1.4"]); - assert_not_match(&r, &["1.0.0-beta1", "0.1.0-alpha", "1.0.1-pre"]); - - let r = req("^1.1"); - assert_match(&r, &["1.1.2", "1.1.0", "1.2.1"]); - assert_not_match(&r, &["0.9.1", "2.9.0", "1.0.1", "0.1.4"]); - - let r = req("^1.1.2"); - assert_match(&r, &["1.1.2", "1.1.4", "1.2.1"]); - assert_not_match(&r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1"]); - assert_not_match(&r, &["1.1.2-alpha1", "1.1.3-alpha1", "2.9.0-alpha1"]); - - let r = req("^0.1.2"); - assert_match(&r, &["0.1.2", "0.1.4"]); - assert_not_match(&r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1"]); - assert_not_match(&r, &["0.1.2-beta", "0.1.3-alpha", "0.2.0-pre"]); - - let r = req("^0.5.1-alpha3"); - assert_match(&r, - &["0.5.1-alpha3", "0.5.1-alpha4", "0.5.1-beta", "0.5.1", "0.5.5"]); - assert_not_match(&r, - &["0.5.1-alpha1", "0.5.2-alpha3", "0.5.5-pre", "0.5.0-pre", "0.6.0"]); - - let r = req("^0.0.2"); - assert_match(&r, &["0.0.2"]); - assert_not_match(&r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1", "0.1.4"]); - - let r = req("^0.0"); - assert_match(&r, &["0.0.2", "0.0.0"]); - assert_not_match(&r, &["0.9.1", "2.9.0", "1.1.1", "0.1.4"]); - - let r = req("^0"); - assert_match(&r, &["0.9.1", "0.0.2", "0.0.0"]); - assert_not_match(&r, &["2.9.0", "1.1.1"]); - - let r = req("^1.4.2-beta.5"); - assert_match(&r, - &["1.4.2", "1.4.3", "1.4.2-beta.5", "1.4.2-beta.6", "1.4.2-c"]); - assert_not_match(&r, - &["0.9.9", "2.0.0", "1.4.2-alpha", "1.4.2-beta.4", "1.4.3-beta.5"]); - } - - #[test] - pub fn test_parsing_wildcard() { - let r = req(""); - assert_match(&r, &["0.9.1", "2.9.0", "0.0.9", "1.0.1", "1.1.1"]); - assert_not_match(&r, &[]); - let r = req("*"); - assert_match(&r, &["0.9.1", "2.9.0", "0.0.9", "1.0.1", "1.1.1"]); - assert_not_match(&r, &[]); - let r = req("x"); - assert_match(&r, &["0.9.1", "2.9.0", "0.0.9", "1.0.1", "1.1.1"]); - assert_not_match(&r, &[]); - let r = req("X"); - assert_match(&r, &["0.9.1", "2.9.0", "0.0.9", "1.0.1", "1.1.1"]); - assert_not_match(&r, &[]); - - let r = req("1.*"); - assert_match(&r, &["1.2.0", "1.2.1", "1.1.1", "1.3.0"]); - assert_not_match(&r, &["0.0.9"]); - let r = req("1.x"); - assert_match(&r, &["1.2.0", "1.2.1", "1.1.1", "1.3.0"]); - assert_not_match(&r, &["0.0.9"]); - let r = req("1.X"); - assert_match(&r, &["1.2.0", "1.2.1", "1.1.1", "1.3.0"]); - assert_not_match(&r, &["0.0.9"]); - - let r = req("1.2.*"); - assert_match(&r, &["1.2.0", "1.2.2", "1.2.4"]); - assert_not_match(&r, &["1.9.0", "1.0.9", "2.0.1", "0.1.3"]); - let r = req("1.2.x"); - assert_match(&r, &["1.2.0", "1.2.2", "1.2.4"]); - assert_not_match(&r, &["1.9.0", "1.0.9", "2.0.1", "0.1.3"]); - let r = req("1.2.X"); - assert_match(&r, &["1.2.0", "1.2.2", "1.2.4"]); - assert_not_match(&r, &["1.9.0", "1.0.9", "2.0.1", "0.1.3"]); - } - - #[test] - pub fn test_any() { - let r = VersionReq::any(); - assert_match(&r, &["0.0.1", "0.1.0", "1.0.0"]); - } - - #[test] - pub fn test_pre() { - let r = req("=2.1.1-really.0"); - assert_match(&r, &["2.1.1-really.0"]); - } - - // #[test] - // pub fn test_parse_errors() { - // assert_eq!(Err(InvalidVersionRequirement), VersionReq::parse("\0")); - // assert_eq!(Err(OpAlreadySet), VersionReq::parse(">= >= 0.0.2")); - // assert_eq!(Err(InvalidSigil), VersionReq::parse(">== 0.0.2")); - // assert_eq!(Err(VersionComponentsMustBeNumeric), - // VersionReq::parse("a.0.0")); - // assert_eq!(Err(InvalidIdentifier), VersionReq::parse("1.0.0-")); - // assert_eq!(Err(MajorVersionRequired), VersionReq::parse(">=")); - // } - - #[test] - pub fn test_from_str() { - assert_eq!("1.0.0".parse::().unwrap().to_string(), - "^1.0.0".to_string()); - assert_eq!("=1.0.0".parse::().unwrap().to_string(), - "= 1.0.0".to_string()); - assert_eq!("~1".parse::().unwrap().to_string(), - "~1".to_string()); - assert_eq!("~1.2".parse::().unwrap().to_string(), - "~1.2".to_string()); - assert_eq!("^1".parse::().unwrap().to_string(), - "^1".to_string()); - assert_eq!("^1.1".parse::().unwrap().to_string(), - "^1.1".to_string()); - assert_eq!("*".parse::().unwrap().to_string(), - "*".to_string()); - assert_eq!("1.*".parse::().unwrap().to_string(), - "1.*".to_string()); - assert_eq!("< 1.0.0".parse::().unwrap().to_string(), - "< 1.0.0".to_string()); - } - - // #[test] - // pub fn test_from_str_errors() { - // assert_eq!(Err(InvalidVersionRequirement), "\0".parse::()); - // assert_eq!(Err(OpAlreadySet), ">= >= 0.0.2".parse::()); - // assert_eq!(Err(InvalidSigil), ">== 0.0.2".parse::()); - // assert_eq!(Err(VersionComponentsMustBeNumeric), - // "a.0.0".parse::()); - // assert_eq!(Err(InvalidIdentifier), "1.0.0-".parse::()); - // assert_eq!(Err(MajorVersionRequired), ">=".parse::()); - // } - - #[test] - fn test_cargo3202() { - let v = "0.*.*".parse::().unwrap(); - assert_eq!("0.*.*", format!("{}", v.predicates[0])); - - let v = "0.0.*".parse::().unwrap(); - assert_eq!("0.0.*", format!("{}", v.predicates[0])); - - let r = req("0.*.*"); - assert_match(&r, &["0.5.0"]); - } - - #[test] - fn test_eq_hash() { - assert!(req("^1") == req("^1")); - assert!(calculate_hash(req("^1")) == calculate_hash(req("^1"))); - assert!(req("^1") != req("^2")); - } - - #[test] - fn test_ordering() { - assert!(req("=1") < req("*")); - assert!(req(">1") < req("*")); - assert!(req(">=1") < req("*")); - assert!(req("<1") < req("*")); - assert!(req("<=1") < req("*")); - assert!(req("~1") < req("*")); - assert!(req("^1") < req("*")); - assert!(req("*") == req("*")); - } -} +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::error::Error; +use std::fmt; +use std::str; + +use semver_parser; +use version::Identifier; +use Version; + +#[cfg(feature = "serde")] +use serde::de::{self, Deserialize, Deserializer, Visitor}; +#[cfg(feature = "serde")] +use serde::ser::{Serialize, Serializer}; + +use self::Op::{Compatible, Ex, Gt, GtEq, Lt, LtEq, Tilde, Wildcard}; +use self::ReqParseError::*; +use self::WildcardVersion::{Major, Minor, Patch}; + +/// A `VersionReq` is a struct containing a list of predicates that can apply to ranges of version +/// numbers. Matching operations can then be done with the `VersionReq` against a particular +/// version to see if it satisfies some or all of the constraints. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +#[cfg_attr(feature = "diesel", derive(AsExpression, FromSqlRow))] +#[cfg_attr(feature = "diesel", sql_type = "diesel::sql_types::Text")] +pub struct VersionReq { + predicates: Vec, +} + +impl From for VersionReq { + fn from(other: semver_parser::range::VersionReq) -> VersionReq { + VersionReq { + predicates: other.predicates.into_iter().map(From::from).collect(), + } + } +} + +#[cfg(feature = "serde")] +impl Serialize for VersionReq { + fn serialize(&self, serializer: S) -> ::std::result::Result + where + S: Serializer, + { + // Serialize VersionReq as a string. + serializer.collect_str(self) + } +} + +#[cfg(feature = "serde")] +impl<'de> Deserialize<'de> for VersionReq { + fn deserialize(deserializer: D) -> ::std::result::Result + where + D: Deserializer<'de>, + { + struct VersionReqVisitor; + + /// Deserialize `VersionReq` from a string. + impl<'de> Visitor<'de> for VersionReqVisitor { + type Value = VersionReq; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a SemVer version requirement as a string") + } + + fn visit_str(self, v: &str) -> ::std::result::Result + where + E: de::Error, + { + VersionReq::parse(v).map_err(de::Error::custom) + } + } + + deserializer.deserialize_str(VersionReqVisitor) + } +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +enum WildcardVersion { + Major, + Minor, + Patch, +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +enum Op { + Ex, // Exact + Gt, // Greater than + GtEq, // Greater than or equal to + Lt, // Less than + LtEq, // Less than or equal to + Tilde, // e.g. ~1.0.0 + Compatible, // compatible by definition of semver, indicated by ^ + Wildcard(WildcardVersion), // x.y.*, x.*, * +} + +impl From for Op { + fn from(other: semver_parser::range::Op) -> Op { + use semver_parser::range; + match other { + range::Op::Ex => Op::Ex, + range::Op::Gt => Op::Gt, + range::Op::GtEq => Op::GtEq, + range::Op::Lt => Op::Lt, + range::Op::LtEq => Op::LtEq, + range::Op::Tilde => Op::Tilde, + range::Op::Compatible => Op::Compatible, + range::Op::Wildcard(version) => match version { + range::WildcardVersion::Major => Op::Wildcard(WildcardVersion::Major), + range::WildcardVersion::Minor => Op::Wildcard(WildcardVersion::Minor), + range::WildcardVersion::Patch => Op::Wildcard(WildcardVersion::Patch), + }, + } + } +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +struct Predicate { + op: Op, + major: u64, + minor: Option, + patch: Option, + pre: Vec, +} + +impl From for Predicate { + fn from(other: semver_parser::range::Predicate) -> Predicate { + Predicate { + op: From::from(other.op), + major: other.major, + minor: other.minor, + patch: other.patch, + pre: other.pre.into_iter().map(From::from).collect(), + } + } +} + +/// A `ReqParseError` is returned from methods which parse a string into a [`VersionReq`]. Each +/// enumeration is one of the possible errors that can occur. +/// [`VersionReq`]: struct.VersionReq.html +#[derive(Clone, Debug, PartialEq)] +pub enum ReqParseError { + /// The given version requirement is invalid. + InvalidVersionRequirement, + /// You have already provided an operation, such as `=`, `~`, or `^`. Only use one. + OpAlreadySet, + /// The sigil you have written is not correct. + InvalidSigil, + /// All components of a version must be numeric. + VersionComponentsMustBeNumeric, + /// There was an error parsing an identifier. + InvalidIdentifier, + /// At least a major version is required. + MajorVersionRequired, + /// An unimplemented version requirement. + UnimplementedVersionRequirement, + /// This form of requirement is deprecated. + DeprecatedVersionRequirement(VersionReq), +} + +impl fmt::Display for ReqParseError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let msg = match self { + InvalidVersionRequirement => "the given version requirement is invalid", + OpAlreadySet => { + "you have already provided an operation, such as =, ~, or ^; only use one" + } + InvalidSigil => "the sigil you have written is not correct", + VersionComponentsMustBeNumeric => "version components must be numeric", + InvalidIdentifier => "invalid identifier", + MajorVersionRequired => "at least a major version number is required", + UnimplementedVersionRequirement => { + "the given version requirement is not implemented, yet" + } + DeprecatedVersionRequirement(_) => "This requirement is deprecated", + }; + msg.fmt(f) + } +} + +impl Error for ReqParseError {} + +impl From for ReqParseError { + fn from(other: String) -> ReqParseError { + match &*other { + "Null is not a valid VersionReq" => ReqParseError::InvalidVersionRequirement, + "VersionReq did not parse properly." => ReqParseError::OpAlreadySet, + _ => ReqParseError::InvalidVersionRequirement, + } + } +} + +impl VersionReq { + /// `any()` is a factory method which creates a `VersionReq` with no constraints. In other + /// words, any version will match against it. + /// + /// # Examples + /// + /// ``` + /// use semver::VersionReq; + /// + /// let anything = VersionReq::any(); + /// ``` + pub fn any() -> VersionReq { + VersionReq { predicates: vec![] } + } + + /// `parse()` is the main constructor of a `VersionReq`. It takes a string like `"^1.2.3"` + /// and turns it into a `VersionReq` that matches that particular constraint. + /// + /// A `Result` is returned which contains a [`ReqParseError`] if there was a problem parsing the + /// `VersionReq`. + /// [`ReqParseError`]: enum.ReqParseError.html + /// + /// # Examples + /// + /// ``` + /// use semver::VersionReq; + /// + /// let version = VersionReq::parse("=1.2.3"); + /// let version = VersionReq::parse(">1.2.3"); + /// let version = VersionReq::parse("<1.2.3"); + /// let version = VersionReq::parse("~1.2.3"); + /// let version = VersionReq::parse("^1.2.3"); + /// let version = VersionReq::parse("1.2.3"); // synonym for ^1.2.3 + /// let version = VersionReq::parse("<=1.2.3"); + /// let version = VersionReq::parse(">=1.2.3"); + /// ``` + /// + /// This example demonstrates error handling, and will panic. + /// + /// ```should_panic + /// use semver::VersionReq; + /// + /// let version = match VersionReq::parse("not a version") { + /// Ok(version) => version, + /// Err(e) => panic!("There was a problem parsing: {}", e), + /// }; + /// ``` + /// + /// # Errors + /// + /// Returns an error variant if the input could not be parsed as a semver requirement. + /// + /// Examples of common error causes are as follows: + /// + /// * `\0` - an invalid version requirement is used. + /// * `>= >= 1.2.3` - multiple operations are used. Only use one. + /// * `>== 1.2.3` - an invalid operation is used. + /// * `a.0.0` - version components are not numeric. + /// * `1.2.3-` - an invalid identifier is present. + /// * `>=` - major version was not specified. At least a major version is required. + /// * `0.2*` - deprecated requirement syntax. Equivalent would be `0.2.*`. + /// + /// You may also encounter an `UnimplementedVersionRequirement` error, which indicates that a + /// given requirement syntax is not yet implemented in this crate. + pub fn parse(input: &str) -> Result { + let res = semver_parser::range::parse(input); + + if let Ok(v) = res { + return Ok(From::from(v)); + } + + match VersionReq::parse_deprecated(input) { + Some(v) => Err(ReqParseError::DeprecatedVersionRequirement(v)), + None => Err(From::from(res.err().unwrap())), + } + } + + fn parse_deprecated(version: &str) -> Option { + match version { + ".*" => Some(VersionReq::any()), + "0.1.0." => Some(VersionReq::parse("0.1.0").unwrap()), + "0.3.1.3" => Some(VersionReq::parse("0.3.13").unwrap()), + "0.2*" => Some(VersionReq::parse("0.2.*").unwrap()), + "*.0" => Some(VersionReq::any()), + _ => None, + } + } + + /// `exact()` is a factory method which creates a `VersionReq` with one exact constraint. + /// + /// # Examples + /// + /// ``` + /// use semver::VersionReq; + /// use semver::Version; + /// + /// let version = Version { major: 1, minor: 1, patch: 1, pre: vec![], build: vec![] }; + /// let exact = VersionReq::exact(&version); + /// ``` + pub fn exact(version: &Version) -> VersionReq { + VersionReq { + predicates: vec![Predicate::exact(version)], + } + } + + /// `matches()` matches a given [`Version`] against this `VersionReq`. + /// [`Version`]: struct.Version.html + /// + /// # Examples + /// + /// ``` + /// use semver::VersionReq; + /// use semver::Version; + /// + /// let version = Version { major: 1, minor: 1, patch: 1, pre: vec![], build: vec![] }; + /// let exact = VersionReq::exact(&version); + /// + /// assert!(exact.matches(&version)); + /// ``` + pub fn matches(&self, version: &Version) -> bool { + // no predicates means anything matches + if self.predicates.is_empty() { + return true; + } + + self.predicates.iter().all(|p| p.matches(version)) + && self + .predicates + .iter() + .any(|p| p.pre_tag_is_compatible(version)) + } + + /// `is_exact()` returns `true` if there is exactly one version which could match this + /// `VersionReq`. If `false` is returned, it is possible that there may still only be exactly + /// one version which could match this `VersionReq`. This function is intended do allow + /// short-circuiting more complex logic where being able to handle only the possibility of a + /// single exact version may be cheaper. + /// + /// # Examples + /// + /// ``` + /// use semver::ReqParseError; + /// use semver::VersionReq; + /// + /// fn use_is_exact() -> Result<(), ReqParseError> { + /// assert!(VersionReq::parse("=1.0.0")?.is_exact()); + /// assert!(!VersionReq::parse("=1.0")?.is_exact()); + /// assert!(!VersionReq::parse(">=1.0.0")?.is_exact()); + /// Ok(()) + /// } + /// + /// use_is_exact().unwrap(); + /// ``` + pub fn is_exact(&self) -> bool { + if let [predicate] = self.predicates.as_slice() { + predicate.has_exactly_one_match() + } else { + false + } + } +} + +impl str::FromStr for VersionReq { + type Err = ReqParseError; + + fn from_str(s: &str) -> Result { + VersionReq::parse(s) + } +} + +impl Predicate { + fn exact(version: &Version) -> Predicate { + Predicate { + op: Ex, + major: version.major, + minor: Some(version.minor), + patch: Some(version.patch), + pre: version.pre.clone(), + } + } + + /// `matches()` takes a `Version` and determines if it matches this particular `Predicate`. + pub fn matches(&self, ver: &Version) -> bool { + match self.op { + Ex => self.matches_exact(ver), + Gt => self.matches_greater(ver), + GtEq => self.matches_exact(ver) || self.matches_greater(ver), + Lt => !self.matches_exact(ver) && !self.matches_greater(ver), + LtEq => !self.matches_greater(ver), + Tilde => self.matches_tilde(ver), + Compatible => self.is_compatible(ver), + Wildcard(_) => self.matches_wildcard(ver), + } + } + + fn matches_exact(&self, ver: &Version) -> bool { + if self.major != ver.major { + return false; + } + + match self.minor { + Some(minor) => { + if minor != ver.minor { + return false; + } + } + None => return true, + } + + match self.patch { + Some(patch) => { + if patch != ver.patch { + return false; + } + } + None => return true, + } + + if self.pre != ver.pre { + return false; + } + + true + } + + // https://docs.npmjs.com/misc/semver#prerelease-tags + fn pre_tag_is_compatible(&self, ver: &Version) -> bool { + // If a version has a prerelease tag (for example, 1.2.3-alpha.3) then it will + // only be + // allowed to satisfy comparator sets if at least one comparator with the same + // [major, + // minor, patch] tuple also has a prerelease tag. + !ver.is_prerelease() + || (self.major == ver.major + && self.minor == Some(ver.minor) + && self.patch == Some(ver.patch) + && !self.pre.is_empty()) + } + + fn matches_greater(&self, ver: &Version) -> bool { + if self.major != ver.major { + return ver.major > self.major; + } + + match self.minor { + Some(minor) => { + if minor != ver.minor { + return ver.minor > minor; + } + } + None => return false, + } + + match self.patch { + Some(patch) => { + if patch != ver.patch { + return ver.patch > patch; + } + } + None => return false, + } + + if !self.pre.is_empty() { + return ver.pre.is_empty() || ver.pre > self.pre; + } + + false + } + + // see https://www.npmjs.com/package/semver for behavior + fn matches_tilde(&self, ver: &Version) -> bool { + let minor = match self.minor { + Some(n) => n, + None => return self.major == ver.major, + }; + + match self.patch { + Some(patch) => { + self.major == ver.major + && minor == ver.minor + && (ver.patch > patch || (ver.patch == patch && self.pre_is_compatible(ver))) + } + None => self.major == ver.major && minor == ver.minor, + } + } + + // see https://www.npmjs.com/package/semver for behavior + fn is_compatible(&self, ver: &Version) -> bool { + if self.major != ver.major { + return false; + } + + let minor = match self.minor { + Some(n) => n, + None => return self.major == ver.major, + }; + + match self.patch { + Some(patch) => { + if self.major == 0 { + if minor == 0 { + ver.minor == minor && ver.patch == patch && self.pre_is_compatible(ver) + } else { + ver.minor == minor + && (ver.patch > patch + || (ver.patch == patch && self.pre_is_compatible(ver))) + } + } else { + ver.minor > minor + || (ver.minor == minor + && (ver.patch > patch + || (ver.patch == patch && self.pre_is_compatible(ver)))) + } + } + None => { + if self.major == 0 { + ver.minor == minor + } else { + ver.minor >= minor + } + } + } + } + + fn pre_is_compatible(&self, ver: &Version) -> bool { + ver.pre.is_empty() || ver.pre >= self.pre + } + + // see https://www.npmjs.com/package/semver for behavior + fn matches_wildcard(&self, ver: &Version) -> bool { + match self.op { + Wildcard(Major) => true, + Wildcard(Minor) => self.major == ver.major, + Wildcard(Patch) => { + match self.minor { + Some(minor) => self.major == ver.major && minor == ver.minor, + None => { + // minor and patch version astericks mean match on major + self.major == ver.major + } + } + } + _ => false, // unreachable + } + } + + fn has_exactly_one_match(&self) -> bool { + self.op == Ex && self.minor.is_some() && self.patch.is_some() + } +} + +impl fmt::Display for VersionReq { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + if self.predicates.is_empty() { + write!(fmt, "*")?; + } else { + for (i, ref pred) in self.predicates.iter().enumerate() { + if i == 0 { + write!(fmt, "{}", pred)?; + } else { + write!(fmt, ", {}", pred)?; + } + } + } + + Ok(()) + } +} + +impl fmt::Display for Predicate { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match self.op { + Wildcard(Major) => write!(fmt, "*")?, + Wildcard(Minor) => write!(fmt, "{}.*", self.major)?, + Wildcard(Patch) => { + if let Some(minor) = self.minor { + write!(fmt, "{}.{}.*", self.major, minor)? + } else { + write!(fmt, "{}.*.*", self.major)? + } + } + _ => { + write!(fmt, "{}{}", self.op, self.major)?; + + if let Some(v) = self.minor { + write!(fmt, ".{}", v)?; + } + + if let Some(v) = self.patch { + write!(fmt, ".{}", v)?; + } + + if !self.pre.is_empty() { + write!(fmt, "-")?; + for (i, x) in self.pre.iter().enumerate() { + if i != 0 { + write!(fmt, ".")? + } + write!(fmt, "{}", x)?; + } + } + } + } + + Ok(()) + } +} + +impl fmt::Display for Op { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match *self { + Ex => write!(fmt, "=")?, + Gt => write!(fmt, ">")?, + GtEq => write!(fmt, ">=")?, + Lt => write!(fmt, "<")?, + LtEq => write!(fmt, "<=")?, + Tilde => write!(fmt, "~")?, + Compatible => write!(fmt, "^")?, + // gets handled specially in Predicate::fmt + Wildcard(_) => write!(fmt, "")?, + } + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::super::version::Version; + use super::{Op, VersionReq}; + use std::hash::{Hash, Hasher}; + + fn req(s: &str) -> VersionReq { + VersionReq::parse(s).unwrap() + } + + fn version(s: &str) -> Version { + match Version::parse(s) { + Ok(v) => v, + Err(e) => panic!("`{}` is not a valid version. Reason: {:?}", s, e), + } + } + + fn assert_match(req: &VersionReq, vers: &[&str]) { + for ver in vers.iter() { + assert!(req.matches(&version(*ver)), "did not match {}", ver); + } + } + + fn assert_not_match(req: &VersionReq, vers: &[&str]) { + for ver in vers.iter() { + assert!(!req.matches(&version(*ver)), "matched {}", ver); + } + } + + fn calculate_hash(t: T) -> u64 { + use std::collections::hash_map::DefaultHasher; + + let mut s = DefaultHasher::new(); + t.hash(&mut s); + s.finish() + } + + #[test] + fn test_parsing_default() { + let r = req("1.0.0"); + + assert_eq!(r.to_string(), "^1.0.0".to_string()); + + assert_match(&r, &["1.0.0", "1.0.1"]); + assert_not_match(&r, &["0.9.9", "0.10.0", "0.1.0"]); + } + + #[test] + fn test_parsing_exact() { + let r = req("=1.0.0"); + + assert!(r.to_string() == "=1.0.0".to_string()); + assert_eq!(r.to_string(), "=1.0.0".to_string()); + + assert_match(&r, &["1.0.0"]); + assert_not_match(&r, &["1.0.1", "0.9.9", "0.10.0", "0.1.0", "1.0.0-pre"]); + + let r = req("=0.9.0"); + + assert_eq!(r.to_string(), "=0.9.0".to_string()); + + assert_match(&r, &["0.9.0"]); + assert_not_match(&r, &["0.9.1", "1.9.0", "0.0.9"]); + + let r = req("=0.1.0-beta2.a"); + + assert_eq!(r.to_string(), "=0.1.0-beta2.a".to_string()); + + assert_match(&r, &["0.1.0-beta2.a"]); + assert_not_match(&r, &["0.9.1", "0.1.0", "0.1.1-beta2.a", "0.1.0-beta2"]); + } + + #[test] + fn test_parse_metadata_see_issue_88_see_issue_88() { + for op in &[ + Op::Compatible, + Op::Ex, + Op::Gt, + Op::GtEq, + Op::Lt, + Op::LtEq, + Op::Tilde, + ] { + req(&format!("{} 1.2.3+meta", op)); + } + } + + #[test] + pub fn test_parsing_greater_than() { + let r = req(">= 1.0.0"); + + assert_eq!(r.to_string(), ">=1.0.0".to_string()); + + assert_match(&r, &["1.0.0", "2.0.0"]); + assert_not_match(&r, &["0.1.0", "0.0.1", "1.0.0-pre", "2.0.0-pre"]); + + let r = req(">= 2.1.0-alpha2"); + + assert_match(&r, &["2.1.0-alpha2", "2.1.0-alpha3", "2.1.0", "3.0.0"]); + assert_not_match( + &r, + &["2.0.0", "2.1.0-alpha1", "2.0.0-alpha2", "3.0.0-alpha2"], + ); + } + + #[test] + pub fn test_parsing_less_than() { + let r = req("< 1.0.0"); + + assert_eq!(r.to_string(), "<1.0.0".to_string()); + + assert_match(&r, &["0.1.0", "0.0.1"]); + assert_not_match(&r, &["1.0.0", "1.0.0-beta", "1.0.1", "0.9.9-alpha"]); + + let r = req("<= 2.1.0-alpha2"); + + assert_match(&r, &["2.1.0-alpha2", "2.1.0-alpha1", "2.0.0", "1.0.0"]); + assert_not_match( + &r, + &["2.1.0", "2.2.0-alpha1", "2.0.0-alpha2", "1.0.0-alpha2"], + ); + } + + #[test] + pub fn test_multiple() { + let r = req("> 0.0.9, <= 2.5.3"); + assert_eq!(r.to_string(), ">0.0.9, <=2.5.3".to_string()); + assert_match(&r, &["0.0.10", "1.0.0", "2.5.3"]); + assert_not_match(&r, &["0.0.8", "2.5.4"]); + + let r = req("0.3.0, 0.4.0"); + assert_eq!(r.to_string(), "^0.3.0, ^0.4.0".to_string()); + assert_not_match(&r, &["0.0.8", "0.3.0", "0.4.0"]); + + let r = req("<= 0.2.0, >= 0.5.0"); + assert_eq!(r.to_string(), "<=0.2.0, >=0.5.0".to_string()); + assert_not_match(&r, &["0.0.8", "0.3.0", "0.5.1"]); + + let r = req("0.1.0, 0.1.4, 0.1.6"); + assert_eq!(r.to_string(), "^0.1.0, ^0.1.4, ^0.1.6".to_string()); + assert_match(&r, &["0.1.6", "0.1.9"]); + assert_not_match(&r, &["0.1.0", "0.1.4", "0.2.0"]); + + assert!(VersionReq::parse("> 0.1.0,").is_err()); + assert!(VersionReq::parse("> 0.3.0, ,").is_err()); + + let r = req(">=0.5.1-alpha3, <0.6"); + assert_eq!(r.to_string(), ">=0.5.1-alpha3, <0.6".to_string()); + assert_match( + &r, + &[ + "0.5.1-alpha3", + "0.5.1-alpha4", + "0.5.1-beta", + "0.5.1", + "0.5.5", + ], + ); + assert_not_match( + &r, + &["0.5.1-alpha1", "0.5.2-alpha3", "0.5.5-pre", "0.5.0-pre"], + ); + assert_not_match(&r, &["0.6.0", "0.6.0-pre"]); + } + + #[test] + pub fn test_parsing_tilde() { + let r = req("~1"); + assert_match(&r, &["1.0.0", "1.0.1", "1.1.1"]); + assert_not_match(&r, &["0.9.1", "2.9.0", "0.0.9"]); + + let r = req("~1.2"); + assert_match(&r, &["1.2.0", "1.2.1"]); + assert_not_match(&r, &["1.1.1", "1.3.0", "0.0.9"]); + + let r = req("~1.2.2"); + assert_match(&r, &["1.2.2", "1.2.4"]); + assert_not_match(&r, &["1.2.1", "1.9.0", "1.0.9", "2.0.1", "0.1.3"]); + + let r = req("~1.2.3-beta.2"); + assert_match(&r, &["1.2.3", "1.2.4", "1.2.3-beta.2", "1.2.3-beta.4"]); + assert_not_match(&r, &["1.3.3", "1.1.4", "1.2.3-beta.1", "1.2.4-beta.2"]); + } + + #[test] + pub fn test_parsing_compatible() { + let r = req("^1"); + assert_match(&r, &["1.1.2", "1.1.0", "1.2.1", "1.0.1"]); + assert_not_match(&r, &["0.9.1", "2.9.0", "0.1.4"]); + assert_not_match(&r, &["1.0.0-beta1", "0.1.0-alpha", "1.0.1-pre"]); + + let r = req("^1.1"); + assert_match(&r, &["1.1.2", "1.1.0", "1.2.1"]); + assert_not_match(&r, &["0.9.1", "2.9.0", "1.0.1", "0.1.4"]); + + let r = req("^1.1.2"); + assert_match(&r, &["1.1.2", "1.1.4", "1.2.1"]); + assert_not_match(&r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1"]); + assert_not_match(&r, &["1.1.2-alpha1", "1.1.3-alpha1", "2.9.0-alpha1"]); + + let r = req("^0.1.2"); + assert_match(&r, &["0.1.2", "0.1.4"]); + assert_not_match(&r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1"]); + assert_not_match(&r, &["0.1.2-beta", "0.1.3-alpha", "0.2.0-pre"]); + + let r = req("^0.5.1-alpha3"); + assert_match( + &r, + &[ + "0.5.1-alpha3", + "0.5.1-alpha4", + "0.5.1-beta", + "0.5.1", + "0.5.5", + ], + ); + assert_not_match( + &r, + &[ + "0.5.1-alpha1", + "0.5.2-alpha3", + "0.5.5-pre", + "0.5.0-pre", + "0.6.0", + ], + ); + + let r = req("^0.0.2"); + assert_match(&r, &["0.0.2"]); + assert_not_match(&r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1", "0.1.4"]); + + let r = req("^0.0"); + assert_match(&r, &["0.0.2", "0.0.0"]); + assert_not_match(&r, &["0.9.1", "2.9.0", "1.1.1", "0.1.4"]); + + let r = req("^0"); + assert_match(&r, &["0.9.1", "0.0.2", "0.0.0"]); + assert_not_match(&r, &["2.9.0", "1.1.1"]); + + let r = req("^1.4.2-beta.5"); + assert_match( + &r, + &["1.4.2", "1.4.3", "1.4.2-beta.5", "1.4.2-beta.6", "1.4.2-c"], + ); + assert_not_match( + &r, + &[ + "0.9.9", + "2.0.0", + "1.4.2-alpha", + "1.4.2-beta.4", + "1.4.3-beta.5", + ], + ); + } + + #[test] + pub fn test_parsing_wildcard() { + let r = req(""); + assert_match(&r, &["0.9.1", "2.9.0", "0.0.9", "1.0.1", "1.1.1"]); + assert_not_match(&r, &[]); + let r = req("*"); + assert_match(&r, &["0.9.1", "2.9.0", "0.0.9", "1.0.1", "1.1.1"]); + assert_not_match(&r, &[]); + let r = req("x"); + assert_match(&r, &["0.9.1", "2.9.0", "0.0.9", "1.0.1", "1.1.1"]); + assert_not_match(&r, &[]); + let r = req("X"); + assert_match(&r, &["0.9.1", "2.9.0", "0.0.9", "1.0.1", "1.1.1"]); + assert_not_match(&r, &[]); + + let r = req("1.*"); + assert_match(&r, &["1.2.0", "1.2.1", "1.1.1", "1.3.0"]); + assert_not_match(&r, &["0.0.9"]); + let r = req("1.x"); + assert_match(&r, &["1.2.0", "1.2.1", "1.1.1", "1.3.0"]); + assert_not_match(&r, &["0.0.9"]); + let r = req("1.X"); + assert_match(&r, &["1.2.0", "1.2.1", "1.1.1", "1.3.0"]); + assert_not_match(&r, &["0.0.9"]); + + let r = req("1.2.*"); + assert_match(&r, &["1.2.0", "1.2.2", "1.2.4"]); + assert_not_match(&r, &["1.9.0", "1.0.9", "2.0.1", "0.1.3"]); + let r = req("1.2.x"); + assert_match(&r, &["1.2.0", "1.2.2", "1.2.4"]); + assert_not_match(&r, &["1.9.0", "1.0.9", "2.0.1", "0.1.3"]); + let r = req("1.2.X"); + assert_match(&r, &["1.2.0", "1.2.2", "1.2.4"]); + assert_not_match(&r, &["1.9.0", "1.0.9", "2.0.1", "0.1.3"]); + } + + #[test] + pub fn test_any() { + let r = VersionReq::any(); + assert_match(&r, &["0.0.1", "0.1.0", "1.0.0"]); + } + + #[test] + pub fn test_pre() { + let r = req("=2.1.1-really.0"); + assert_match(&r, &["2.1.1-really.0"]); + } + + // #[test] + // pub fn test_parse_errors() { + // assert_eq!(Err(InvalidVersionRequirement), VersionReq::parse("\0")); + // assert_eq!(Err(OpAlreadySet), VersionReq::parse(">= >= 0.0.2")); + // assert_eq!(Err(InvalidSigil), VersionReq::parse(">== 0.0.2")); + // assert_eq!(Err(VersionComponentsMustBeNumeric), + // VersionReq::parse("a.0.0")); + // assert_eq!(Err(InvalidIdentifier), VersionReq::parse("1.0.0-")); + // assert_eq!(Err(MajorVersionRequired), VersionReq::parse(">=")); + // } + + #[test] + pub fn test_from_str() { + assert_eq!( + "1.0.0".parse::().unwrap().to_string(), + "^1.0.0".to_string() + ); + assert_eq!( + "=1.0.0".parse::().unwrap().to_string(), + "=1.0.0".to_string() + ); + assert_eq!( + "~1".parse::().unwrap().to_string(), + "~1".to_string() + ); + assert_eq!( + "~1.2".parse::().unwrap().to_string(), + "~1.2".to_string() + ); + assert_eq!( + "^1".parse::().unwrap().to_string(), + "^1".to_string() + ); + assert_eq!( + "^1.1".parse::().unwrap().to_string(), + "^1.1".to_string() + ); + assert_eq!( + "*".parse::().unwrap().to_string(), + "*".to_string() + ); + assert_eq!( + "1.*".parse::().unwrap().to_string(), + "1.*".to_string() + ); + assert_eq!( + "< 1.0.0".parse::().unwrap().to_string(), + "<1.0.0".to_string() + ); + } + + // #[test] + // pub fn test_from_str_errors() { + // assert_eq!(Err(InvalidVersionRequirement), "\0".parse::()); + // assert_eq!(Err(OpAlreadySet), ">= >= 0.0.2".parse::()); + // assert_eq!(Err(InvalidSigil), ">== 0.0.2".parse::()); + // assert_eq!(Err(VersionComponentsMustBeNumeric), + // "a.0.0".parse::()); + // assert_eq!(Err(InvalidIdentifier), "1.0.0-".parse::()); + // assert_eq!(Err(MajorVersionRequired), ">=".parse::()); + // } + + #[test] + fn test_cargo3202() { + let v = "0.*.*".parse::().unwrap(); + assert_eq!("0.*.*", format!("{}", v.predicates[0])); + + let v = "0.0.*".parse::().unwrap(); + assert_eq!("0.0.*", format!("{}", v.predicates[0])); + + let r = req("0.*.*"); + assert_match(&r, &["0.5.0"]); + } + + #[test] + fn test_eq_hash() { + assert!(req("^1") == req("^1")); + assert!(calculate_hash(req("^1")) == calculate_hash(req("^1"))); + assert!(req("^1") != req("^2")); + } + + #[test] + fn test_ordering() { + assert!(req("=1") < req("*")); + assert!(req(">1") < req("*")); + assert!(req(">=1") < req("*")); + assert!(req("<1") < req("*")); + assert!(req("<=1") < req("*")); + assert!(req("~1") < req("*")); + assert!(req("^1") < req("*")); + assert!(req("*") == req("*")); + } + + #[test] + fn is_exact() { + assert!(req("=1.0.0").is_exact()); + assert!(req("=1.0.0-alpha").is_exact()); + + assert!(!req("=1").is_exact()); + assert!(!req(">=1.0.0").is_exact()); + assert!(!req(">=1.0.0, <2.0.0").is_exact()); + } +} diff -Nru cargo-0.44.1/vendor/semver/src/version.rs cargo-0.47.0/vendor/semver/src/version.rs --- cargo-0.44.1/vendor/semver/src/version.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/semver/src/version.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,759 +1,871 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! The `version` module gives you tools to create and compare SemVer-compliant -//! versions. - -use std::cmp::{self, Ordering}; -use std::fmt; -use std::hash; -use std::error::Error; - -use std::result; -use std::str; - -use semver_parser; - -#[cfg(feature = "serde")] -use serde::ser::{Serialize, Serializer}; -#[cfg(feature = "serde")] -use serde::de::{self, Deserialize, Deserializer, Visitor}; - -/// An identifier in the pre-release or build metadata. -/// -/// See sections 9 and 10 of the spec for more about pre-release identifers and -/// build metadata. -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -pub enum Identifier { - /// An identifier that's solely numbers. - Numeric(u64), - /// An identifier with letters and numbers. - AlphaNumeric(String), -} - -impl From for Identifier { - fn from(other: semver_parser::version::Identifier) -> Identifier { - match other { - semver_parser::version::Identifier::Numeric(n) => Identifier::Numeric(n), - semver_parser::version::Identifier::AlphaNumeric(s) => Identifier::AlphaNumeric(s), - } - } -} - -impl fmt::Display for Identifier { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Identifier::Numeric(ref n) => fmt::Display::fmt(n, f), - Identifier::AlphaNumeric(ref s) => fmt::Display::fmt(s, f), - } - } -} - -#[cfg(feature = "serde")] -impl Serialize for Identifier { - fn serialize(&self, serializer: S) -> result::Result - where S: Serializer - { - // Serialize Identifier as a number or string. - match *self { - Identifier::Numeric(n) => serializer.serialize_u64(n), - Identifier::AlphaNumeric(ref s) => serializer.serialize_str(s), - } - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Identifier { - fn deserialize(deserializer: D) -> result::Result - where D: Deserializer<'de> - { - struct IdentifierVisitor; - - // Deserialize Identifier from a number or string. - impl<'de> Visitor<'de> for IdentifierVisitor { - type Value = Identifier; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a SemVer pre-release or build identifier") - } - - fn visit_u64(self, numeric: u64) -> result::Result - where E: de::Error - { - Ok(Identifier::Numeric(numeric)) - } - - fn visit_str(self, alphanumeric: &str) -> result::Result - where E: de::Error - { - Ok(Identifier::AlphaNumeric(alphanumeric.to_owned())) - } - } - - deserializer.deserialize_any(IdentifierVisitor) - } -} - -/// Represents a version number conforming to the semantic versioning scheme. -#[derive(Clone, Eq, Debug)] -pub struct Version { - /// The major version, to be incremented on incompatible changes. - pub major: u64, - /// The minor version, to be incremented when functionality is added in a - /// backwards-compatible manner. - pub minor: u64, - /// The patch version, to be incremented when backwards-compatible bug - /// fixes are made. - pub patch: u64, - /// The pre-release version identifier, if one exists. - pub pre: Vec, - /// The build metadata, ignored when determining version precedence. - pub build: Vec, -} - -impl From for Version { - fn from(other: semver_parser::version::Version) -> Version { - Version { - major: other.major, - minor: other.minor, - patch: other.patch, - pre: other.pre.into_iter().map(From::from).collect(), - build: other.build.into_iter().map(From::from).collect(), - } - } -} - -#[cfg(feature = "serde")] -impl Serialize for Version { - fn serialize(&self, serializer: S) -> result::Result - where S: Serializer - { - // Serialize Version as a string. - serializer.collect_str(self) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Version { - fn deserialize(deserializer: D) -> result::Result - where D: Deserializer<'de> - { - struct VersionVisitor; - - // Deserialize Version from a string. - impl<'de> Visitor<'de> for VersionVisitor { - type Value = Version; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a SemVer version as a string") - } - - fn visit_str(self, v: &str) -> result::Result - where E: de::Error - { - Version::parse(v).map_err(de::Error::custom) - } - } - - deserializer.deserialize_str(VersionVisitor) - } -} - -/// An error type for this crate -/// -/// Currently, just a generic error. Will make this nicer later. -#[derive(Clone,PartialEq,Debug,PartialOrd)] -pub enum SemVerError { - /// An error ocurred while parsing. - ParseError(String), -} - -impl fmt::Display for SemVerError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - &SemVerError::ParseError(ref m) => write!(f, "{}", m), - } - } -} - -impl Error for SemVerError { - fn description(&self) -> &str { - match self { - &SemVerError::ParseError(ref m) => m, - } - } -} - -/// A Result type for errors -pub type Result = result::Result; - -impl Version { - - /// Contructs the simple case without pre or build. - pub fn new(major: u64, minor: u64, patch: u64) -> Version { - Version { - major: major, - minor: minor, - patch: patch, - pre: Vec::new(), - build: Vec::new() - } - } - - /// Parse a string into a semver object. - pub fn parse(version: &str) -> Result { - let res = semver_parser::version::parse(version); - - match res { - // Convert plain String error into proper ParseError - Err(e) => Err(SemVerError::ParseError(e)), - Ok(v) => Ok(From::from(v)), - } - } - - /// Clears the build metadata - fn clear_metadata(&mut self) { - self.build = Vec::new(); - self.pre = Vec::new(); - } - - /// Increments the patch number for this Version (Must be mutable) - pub fn increment_patch(&mut self) { - self.patch += 1; - self.clear_metadata(); - } - - /// Increments the minor version number for this Version (Must be mutable) - /// - /// As instructed by section 7 of the spec, the patch number is reset to 0. - pub fn increment_minor(&mut self) { - self.minor += 1; - self.patch = 0; - self.clear_metadata(); - } - - /// Increments the major version number for this Version (Must be mutable) - /// - /// As instructed by section 8 of the spec, the minor and patch numbers are - /// reset to 0 - pub fn increment_major(&mut self) { - self.major += 1; - self.minor = 0; - self.patch = 0; - self.clear_metadata(); - } - - /// Checks to see if the current Version is in pre-release status - pub fn is_prerelease(&self) -> bool { - !self.pre.is_empty() - } -} - -impl str::FromStr for Version { - type Err = SemVerError; - - fn from_str(s: &str) -> Result { - Version::parse(s) - } -} - -impl fmt::Display for Version { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - try!(write!(f, "{}.{}.{}", self.major, self.minor, self.patch)); - if !self.pre.is_empty() { - try!(write!(f, "-")); - for (i, x) in self.pre.iter().enumerate() { - if i != 0 { - try!(write!(f, ".")) - } - try!(write!(f, "{}", x)); - } - } - if !self.build.is_empty() { - try!(write!(f, "+")); - for (i, x) in self.build.iter().enumerate() { - if i != 0 { - try!(write!(f, ".")) - } - try!(write!(f, "{}", x)); - } - } - Ok(()) - } -} - -impl cmp::PartialEq for Version { - #[inline] - fn eq(&self, other: &Version) -> bool { - // We should ignore build metadata here, otherwise versions v1 and v2 - // can exist such that !(v1 < v2) && !(v1 > v2) && v1 != v2, which - // violate strict total ordering rules. - self.major == other.major && self.minor == other.minor && self.patch == other.patch && - self.pre == other.pre - } -} - -impl cmp::PartialOrd for Version { - fn partial_cmp(&self, other: &Version) -> Option { - Some(self.cmp(other)) - } -} - -impl cmp::Ord for Version { - fn cmp(&self, other: &Version) -> Ordering { - match self.major.cmp(&other.major) { - Ordering::Equal => {} - r => return r, - } - - match self.minor.cmp(&other.minor) { - Ordering::Equal => {} - r => return r, - } - - match self.patch.cmp(&other.patch) { - Ordering::Equal => {} - r => return r, - } - - // NB: semver spec says 0.0.0-pre < 0.0.0 - // but the version of ord defined for vec - // says that [] < [pre] so we alter it here - match (self.pre.len(), other.pre.len()) { - (0, 0) => Ordering::Equal, - (0, _) => Ordering::Greater, - (_, 0) => Ordering::Less, - (_, _) => self.pre.cmp(&other.pre), - } - } -} - -impl hash::Hash for Version { - fn hash(&self, into: &mut H) { - self.major.hash(into); - self.minor.hash(into); - self.patch.hash(into); - self.pre.hash(into); - } -} - -impl From<(u64,u64,u64)> for Version { - fn from(tuple: (u64,u64,u64)) -> Version { - let (major, minor, patch) = tuple; - Version::new(major, minor, patch) - } -} - -#[cfg(test)] -mod tests { - use std::result; - use super::Version; - use super::Identifier; - use super::SemVerError; - - #[test] - fn test_parse() { - fn parse_error(e: &str) -> result::Result { - return Err(SemVerError::ParseError(e.to_string())); - } - - assert_eq!(Version::parse(""), - parse_error("Error parsing major identifier")); - assert_eq!(Version::parse(" "), - parse_error("Error parsing major identifier")); - assert_eq!(Version::parse("1"), - parse_error("Expected dot")); - assert_eq!(Version::parse("1.2"), - parse_error("Expected dot")); - assert_eq!(Version::parse("1.2.3-"), - parse_error("Error parsing prerelease")); - assert_eq!(Version::parse("a.b.c"), - parse_error("Error parsing major identifier")); - assert_eq!(Version::parse("1.2.3 abc"), - parse_error("Extra junk after valid version: abc")); - - assert_eq!(Version::parse("1.2.3"), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: Vec::new(), - build: Vec::new(), - })); - - assert_eq!(Version::parse("1.2.3"), - Ok(Version::new(1,2,3))); - - assert_eq!(Version::parse(" 1.2.3 "), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: Vec::new(), - build: Vec::new(), - })); - assert_eq!(Version::parse("1.2.3-alpha1"), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], - build: Vec::new(), - })); - assert_eq!(Version::parse(" 1.2.3-alpha1 "), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], - build: Vec::new(), - })); - assert_eq!(Version::parse("1.2.3+build5"), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: Vec::new(), - build: vec![Identifier::AlphaNumeric(String::from("build5"))], - })); - assert_eq!(Version::parse(" 1.2.3+build5 "), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: Vec::new(), - build: vec![Identifier::AlphaNumeric(String::from("build5"))], - })); - assert_eq!(Version::parse("1.2.3-alpha1+build5"), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], - build: vec![Identifier::AlphaNumeric(String::from("build5"))], - })); - assert_eq!(Version::parse(" 1.2.3-alpha1+build5 "), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], - build: vec![Identifier::AlphaNumeric(String::from("build5"))], - })); - assert_eq!(Version::parse("1.2.3-1.alpha1.9+build5.7.3aedf "), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: vec![Identifier::Numeric(1), - Identifier::AlphaNumeric(String::from("alpha1")), - Identifier::Numeric(9), - ], - build: vec![Identifier::AlphaNumeric(String::from("build5")), - Identifier::Numeric(7), - Identifier::AlphaNumeric(String::from("3aedf")), - ], - })); - assert_eq!(Version::parse("0.4.0-beta.1+0851523"), - Ok(Version { - major: 0, - minor: 4, - patch: 0, - pre: vec![Identifier::AlphaNumeric(String::from("beta")), - Identifier::Numeric(1), - ], - build: vec![Identifier::AlphaNumeric(String::from("0851523"))], - })); - - } - - #[test] - fn test_increment_patch() { - let mut buggy_release = Version::parse("0.1.0").unwrap(); - buggy_release.increment_patch(); - assert_eq!(buggy_release, Version::parse("0.1.1").unwrap()); - } - - #[test] - fn test_increment_minor() { - let mut feature_release = Version::parse("1.4.6").unwrap(); - feature_release.increment_minor(); - assert_eq!(feature_release, Version::parse("1.5.0").unwrap()); - } - - #[test] - fn test_increment_major() { - let mut chrome_release = Version::parse("46.1.246773").unwrap(); - chrome_release.increment_major(); - assert_eq!(chrome_release, Version::parse("47.0.0").unwrap()); - } - - #[test] - fn test_increment_keep_prerelease() { - let mut release = Version::parse("1.0.0-alpha").unwrap(); - release.increment_patch(); - - assert_eq!(release, Version::parse("1.0.1").unwrap()); - - release.increment_minor(); - - assert_eq!(release, Version::parse("1.1.0").unwrap()); - - release.increment_major(); - - assert_eq!(release, Version::parse("2.0.0").unwrap()); - } - - - #[test] - fn test_increment_clear_metadata() { - let mut release = Version::parse("1.0.0+4442").unwrap(); - release.increment_patch(); - - assert_eq!(release, Version::parse("1.0.1").unwrap()); - release = Version::parse("1.0.1+hello").unwrap(); - - release.increment_minor(); - - assert_eq!(release, Version::parse("1.1.0").unwrap()); - release = Version::parse("1.1.3747+hello").unwrap(); - - release.increment_major(); - - assert_eq!(release, Version::parse("2.0.0").unwrap()); - } - - #[test] - fn test_eq() { - assert_eq!(Version::parse("1.2.3"), Version::parse("1.2.3")); - assert_eq!(Version::parse("1.2.3-alpha1"), - Version::parse("1.2.3-alpha1")); - assert_eq!(Version::parse("1.2.3+build.42"), - Version::parse("1.2.3+build.42")); - assert_eq!(Version::parse("1.2.3-alpha1+42"), - Version::parse("1.2.3-alpha1+42")); - assert_eq!(Version::parse("1.2.3+23"), Version::parse("1.2.3+42")); - } - - #[test] - fn test_ne() { - assert!(Version::parse("0.0.0") != Version::parse("0.0.1")); - assert!(Version::parse("0.0.0") != Version::parse("0.1.0")); - assert!(Version::parse("0.0.0") != Version::parse("1.0.0")); - assert!(Version::parse("1.2.3-alpha") != Version::parse("1.2.3-beta")); - } - - #[test] - fn test_show() { - assert_eq!(format!("{}", Version::parse("1.2.3").unwrap()), - "1.2.3".to_string()); - assert_eq!(format!("{}", Version::parse("1.2.3-alpha1").unwrap()), - "1.2.3-alpha1".to_string()); - assert_eq!(format!("{}", Version::parse("1.2.3+build.42").unwrap()), - "1.2.3+build.42".to_string()); - assert_eq!(format!("{}", Version::parse("1.2.3-alpha1+42").unwrap()), - "1.2.3-alpha1+42".to_string()); - } - - #[test] - fn test_to_string() { - assert_eq!(Version::parse("1.2.3").unwrap().to_string(), - "1.2.3".to_string()); - assert_eq!(Version::parse("1.2.3-alpha1").unwrap().to_string(), - "1.2.3-alpha1".to_string()); - assert_eq!(Version::parse("1.2.3+build.42").unwrap().to_string(), - "1.2.3+build.42".to_string()); - assert_eq!(Version::parse("1.2.3-alpha1+42").unwrap().to_string(), - "1.2.3-alpha1+42".to_string()); - } - - #[test] - fn test_lt() { - assert!(Version::parse("0.0.0") < Version::parse("1.2.3-alpha2")); - assert!(Version::parse("1.0.0") < Version::parse("1.2.3-alpha2")); - assert!(Version::parse("1.2.0") < Version::parse("1.2.3-alpha2")); - assert!(Version::parse("1.2.3-alpha1") < Version::parse("1.2.3")); - assert!(Version::parse("1.2.3-alpha1") < Version::parse("1.2.3-alpha2")); - assert!(!(Version::parse("1.2.3-alpha2") < Version::parse("1.2.3-alpha2"))); - assert!(!(Version::parse("1.2.3+23") < Version::parse("1.2.3+42"))); - } - - #[test] - fn test_le() { - assert!(Version::parse("0.0.0") <= Version::parse("1.2.3-alpha2")); - assert!(Version::parse("1.0.0") <= Version::parse("1.2.3-alpha2")); - assert!(Version::parse("1.2.0") <= Version::parse("1.2.3-alpha2")); - assert!(Version::parse("1.2.3-alpha1") <= Version::parse("1.2.3-alpha2")); - assert!(Version::parse("1.2.3-alpha2") <= Version::parse("1.2.3-alpha2")); - assert!(Version::parse("1.2.3+23") <= Version::parse("1.2.3+42")); - } - - #[test] - fn test_gt() { - assert!(Version::parse("1.2.3-alpha2") > Version::parse("0.0.0")); - assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.0.0")); - assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.0")); - assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.3-alpha1")); - assert!(Version::parse("1.2.3") > Version::parse("1.2.3-alpha2")); - assert!(!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.3-alpha2"))); - assert!(!(Version::parse("1.2.3+23") > Version::parse("1.2.3+42"))); - } - - #[test] - fn test_ge() { - assert!(Version::parse("1.2.3-alpha2") >= Version::parse("0.0.0")); - assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.0.0")); - assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.0")); - assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.3-alpha1")); - assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.3-alpha2")); - assert!(Version::parse("1.2.3+23") >= Version::parse("1.2.3+42")); - } - - #[test] - fn test_prerelease_check() { - assert!(Version::parse("1.0.0").unwrap().is_prerelease() == false); - assert!(Version::parse("0.0.1").unwrap().is_prerelease() == false); - assert!(Version::parse("4.1.4-alpha").unwrap().is_prerelease()); - assert!(Version::parse("1.0.0-beta294296").unwrap().is_prerelease()); - } - - #[test] - fn test_spec_order() { - let vs = ["1.0.0-alpha", - "1.0.0-alpha.1", - "1.0.0-alpha.beta", - "1.0.0-beta", - "1.0.0-beta.2", - "1.0.0-beta.11", - "1.0.0-rc.1", - "1.0.0"]; - let mut i = 1; - while i < vs.len() { - let a = Version::parse(vs[i - 1]); - let b = Version::parse(vs[i]); - assert!(a < b, "nope {:?} < {:?}", a, b); - i += 1; - } - } - - #[test] - fn test_from_str() { - assert_eq!("1.2.3".parse(), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: Vec::new(), - build: Vec::new(), - })); - assert_eq!(" 1.2.3 ".parse(), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: Vec::new(), - build: Vec::new(), - })); - assert_eq!("1.2.3-alpha1".parse(), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], - build: Vec::new(), - })); - assert_eq!(" 1.2.3-alpha1 ".parse(), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], - build: Vec::new(), - })); - assert_eq!("1.2.3+build5".parse(), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: Vec::new(), - build: vec![Identifier::AlphaNumeric(String::from("build5"))], - })); - assert_eq!(" 1.2.3+build5 ".parse(), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: Vec::new(), - build: vec![Identifier::AlphaNumeric(String::from("build5"))], - })); - assert_eq!("1.2.3-alpha1+build5".parse(), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], - build: vec![Identifier::AlphaNumeric(String::from("build5"))], - })); - assert_eq!(" 1.2.3-alpha1+build5 ".parse(), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], - build: vec![Identifier::AlphaNumeric(String::from("build5"))], - })); - assert_eq!("1.2.3-1.alpha1.9+build5.7.3aedf ".parse(), - Ok(Version { - major: 1, - minor: 2, - patch: 3, - pre: vec![Identifier::Numeric(1), - Identifier::AlphaNumeric(String::from("alpha1")), - Identifier::Numeric(9), - ], - build: vec![Identifier::AlphaNumeric(String::from("build5")), - Identifier::Numeric(7), - Identifier::AlphaNumeric(String::from("3aedf")), - ], - })); - assert_eq!("0.4.0-beta.1+0851523".parse(), - Ok(Version { - major: 0, - minor: 4, - patch: 0, - pre: vec![Identifier::AlphaNumeric(String::from("beta")), - Identifier::Numeric(1), - ], - build: vec![Identifier::AlphaNumeric(String::from("0851523"))], - })); - - } - - #[test] - fn test_from_str_errors() { - fn parse_error(e: &str) -> result::Result { - return Err(SemVerError::ParseError(e.to_string())); - } - - assert_eq!("".parse(), parse_error("Error parsing major identifier")); - assert_eq!(" ".parse(), parse_error("Error parsing major identifier")); - assert_eq!("1".parse(), parse_error("Expected dot")); - assert_eq!("1.2".parse(), - parse_error("Expected dot")); - assert_eq!("1.2.3-".parse(), - parse_error("Error parsing prerelease")); - assert_eq!("a.b.c".parse(), - parse_error("Error parsing major identifier")); - assert_eq!("1.2.3 abc".parse(), - parse_error("Extra junk after valid version: abc")); - } -} +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The `version` module gives you tools to create and compare SemVer-compliant +//! versions. + +use std::cmp::{self, Ordering}; +use std::error::Error; +use std::fmt; +use std::hash; + +use std::result; +use std::str; + +use semver_parser; + +#[cfg(feature = "serde")] +use serde::de::{self, Deserialize, Deserializer, Visitor}; +#[cfg(feature = "serde")] +use serde::ser::{Serialize, Serializer}; + +/// An identifier in the pre-release or build metadata. +/// +/// See sections 9 and 10 of the spec for more about pre-release identifers and +/// build metadata. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub enum Identifier { + /// An identifier that's solely numbers. + Numeric(u64), + /// An identifier with letters and numbers. + AlphaNumeric(String), +} + +impl From for Identifier { + fn from(other: semver_parser::version::Identifier) -> Identifier { + match other { + semver_parser::version::Identifier::Numeric(n) => Identifier::Numeric(n), + semver_parser::version::Identifier::AlphaNumeric(s) => Identifier::AlphaNumeric(s), + } + } +} + +impl fmt::Display for Identifier { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Identifier::Numeric(ref n) => fmt::Display::fmt(n, f), + Identifier::AlphaNumeric(ref s) => fmt::Display::fmt(s, f), + } + } +} + +#[cfg(feature = "serde")] +impl Serialize for Identifier { + fn serialize(&self, serializer: S) -> result::Result + where + S: Serializer, + { + // Serialize Identifier as a number or string. + match *self { + Identifier::Numeric(n) => serializer.serialize_u64(n), + Identifier::AlphaNumeric(ref s) => serializer.serialize_str(s), + } + } +} + +#[cfg(feature = "serde")] +impl<'de> Deserialize<'de> for Identifier { + fn deserialize(deserializer: D) -> result::Result + where + D: Deserializer<'de>, + { + struct IdentifierVisitor; + + // Deserialize Identifier from a number or string. + impl<'de> Visitor<'de> for IdentifierVisitor { + type Value = Identifier; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a SemVer pre-release or build identifier") + } + + fn visit_u64(self, numeric: u64) -> result::Result + where + E: de::Error, + { + Ok(Identifier::Numeric(numeric)) + } + + fn visit_str(self, alphanumeric: &str) -> result::Result + where + E: de::Error, + { + Ok(Identifier::AlphaNumeric(alphanumeric.to_owned())) + } + } + + deserializer.deserialize_any(IdentifierVisitor) + } +} + +/// Represents a version number conforming to the semantic versioning scheme. +#[derive(Clone, Eq, Debug)] +#[cfg_attr(feature = "diesel", derive(AsExpression, FromSqlRow))] +#[cfg_attr(feature = "diesel", sql_type = "diesel::sql_types::Text")] +pub struct Version { + /// The major version, to be incremented on incompatible changes. + pub major: u64, + /// The minor version, to be incremented when functionality is added in a + /// backwards-compatible manner. + pub minor: u64, + /// The patch version, to be incremented when backwards-compatible bug + /// fixes are made. + pub patch: u64, + /// The pre-release version identifier, if one exists. + pub pre: Vec, + /// The build metadata, ignored when determining version precedence. + pub build: Vec, +} + +impl From for Version { + fn from(other: semver_parser::version::Version) -> Version { + Version { + major: other.major, + minor: other.minor, + patch: other.patch, + pre: other.pre.into_iter().map(From::from).collect(), + build: other.build.into_iter().map(From::from).collect(), + } + } +} + +#[cfg(feature = "serde")] +impl Serialize for Version { + fn serialize(&self, serializer: S) -> result::Result + where + S: Serializer, + { + // Serialize Version as a string. + serializer.collect_str(self) + } +} + +#[cfg(feature = "serde")] +impl<'de> Deserialize<'de> for Version { + fn deserialize(deserializer: D) -> result::Result + where + D: Deserializer<'de>, + { + struct VersionVisitor; + + // Deserialize Version from a string. + impl<'de> Visitor<'de> for VersionVisitor { + type Value = Version; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a SemVer version as a string") + } + + fn visit_str(self, v: &str) -> result::Result + where + E: de::Error, + { + Version::parse(v).map_err(de::Error::custom) + } + } + + deserializer.deserialize_str(VersionVisitor) + } +} + +/// An error type for this crate +/// +/// Currently, just a generic error. Will make this nicer later. +#[derive(Clone, PartialEq, Debug, PartialOrd)] +pub enum SemVerError { + /// An error ocurred while parsing. + ParseError(String), +} + +impl fmt::Display for SemVerError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + SemVerError::ParseError(ref m) => write!(f, "{}", m), + } + } +} + +impl Error for SemVerError {} + +/// A Result type for errors +pub type Result = result::Result; + +impl Version { + /// Contructs the simple case without pre or build. + pub fn new(major: u64, minor: u64, patch: u64) -> Version { + Version { + major, + minor, + patch, + pre: Vec::new(), + build: Vec::new(), + } + } + + /// Parse a string into a semver object. + /// + /// # Errors + /// + /// Returns an error variant if the input could not be parsed as a semver object. + /// + /// In general, this means that the provided string does not conform to the + /// [semver spec][semver]. + /// + /// An error for overflow is returned if any numeric component is larger than what can be + /// stored in `u64`. + /// + /// The following are examples for other common error causes: + /// + /// * `1.0` - too few numeric components are used. Exactly 3 are expected. + /// * `1.0.01` - a numeric component has a leading zero. + /// * `1.0.foo` - uses a non-numeric components where one is expected. + /// * `1.0.0foo` - metadata is not separated using a legal character like, `+` or `-`. + /// * `1.0.0+foo_123` - contains metadata with an illegal character (`_`). + /// Legal characters for metadata include `a-z`, `A-Z`, `0-9`, `-`, and `.` (dot). + /// + /// [semver]: https://semver.org + pub fn parse(version: &str) -> Result { + let res = semver_parser::version::parse(version); + + match res { + // Convert plain String error into proper ParseError + Err(e) => Err(SemVerError::ParseError(e)), + Ok(v) => Ok(From::from(v)), + } + } + + /// Clears the build metadata + fn clear_metadata(&mut self) { + self.build = Vec::new(); + self.pre = Vec::new(); + } + + /// Increments the patch number for this Version (Must be mutable) + pub fn increment_patch(&mut self) { + self.patch += 1; + self.clear_metadata(); + } + + /// Increments the minor version number for this Version (Must be mutable) + /// + /// As instructed by section 7 of the spec, the patch number is reset to 0. + pub fn increment_minor(&mut self) { + self.minor += 1; + self.patch = 0; + self.clear_metadata(); + } + + /// Increments the major version number for this Version (Must be mutable) + /// + /// As instructed by section 8 of the spec, the minor and patch numbers are + /// reset to 0 + pub fn increment_major(&mut self) { + self.major += 1; + self.minor = 0; + self.patch = 0; + self.clear_metadata(); + } + + /// Checks to see if the current Version is in pre-release status + pub fn is_prerelease(&self) -> bool { + !self.pre.is_empty() + } +} + +impl str::FromStr for Version { + type Err = SemVerError; + + fn from_str(s: &str) -> Result { + Version::parse(s) + } +} + +impl fmt::Display for Version { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut result = format!("{}.{}.{}", self.major, self.minor, self.patch); + + if !self.pre.is_empty() { + result.push_str("-"); + for (i, x) in self.pre.iter().enumerate() { + if i != 0 { + result.push_str("."); + } + result.push_str(format!("{}", x).as_ref()); + } + } + if !self.build.is_empty() { + result.push_str("+"); + for (i, x) in self.build.iter().enumerate() { + if i != 0 { + result.push_str("."); + } + result.push_str(format!("{}", x).as_ref()); + } + } + + f.pad(result.as_ref())?; + Ok(()) + } +} + +impl cmp::PartialEq for Version { + #[inline] + fn eq(&self, other: &Version) -> bool { + // We should ignore build metadata here, otherwise versions v1 and v2 + // can exist such that !(v1 < v2) && !(v1 > v2) && v1 != v2, which + // violate strict total ordering rules. + self.major == other.major + && self.minor == other.minor + && self.patch == other.patch + && self.pre == other.pre + } +} + +impl cmp::PartialOrd for Version { + fn partial_cmp(&self, other: &Version) -> Option { + Some(self.cmp(other)) + } +} + +impl cmp::Ord for Version { + fn cmp(&self, other: &Version) -> Ordering { + match self.major.cmp(&other.major) { + Ordering::Equal => {} + r => return r, + } + + match self.minor.cmp(&other.minor) { + Ordering::Equal => {} + r => return r, + } + + match self.patch.cmp(&other.patch) { + Ordering::Equal => {} + r => return r, + } + + // NB: semver spec says 0.0.0-pre < 0.0.0 + // but the version of ord defined for vec + // says that [] < [pre] so we alter it here + match (self.pre.len(), other.pre.len()) { + (0, 0) => Ordering::Equal, + (0, _) => Ordering::Greater, + (_, 0) => Ordering::Less, + (_, _) => self.pre.cmp(&other.pre), + } + } +} + +impl hash::Hash for Version { + fn hash(&self, into: &mut H) { + self.major.hash(into); + self.minor.hash(into); + self.patch.hash(into); + self.pre.hash(into); + } +} + +impl From<(u64, u64, u64)> for Version { + fn from(tuple: (u64, u64, u64)) -> Version { + let (major, minor, patch) = tuple; + Version::new(major, minor, patch) + } +} + +#[cfg(test)] +mod tests { + use super::Identifier; + use super::SemVerError; + use super::Version; + use std::result; + + #[test] + fn test_parse() { + fn parse_error(e: &str) -> result::Result { + return Err(SemVerError::ParseError(e.to_string())); + } + + assert_eq!( + Version::parse(""), + parse_error("Error parsing major identifier") + ); + assert_eq!( + Version::parse(" "), + parse_error("Error parsing major identifier") + ); + assert_eq!(Version::parse("1"), parse_error("Expected dot")); + assert_eq!(Version::parse("1.2"), parse_error("Expected dot")); + assert_eq!( + Version::parse("1.2.3-"), + parse_error("Error parsing prerelease") + ); + assert_eq!( + Version::parse("a.b.c"), + parse_error("Error parsing major identifier") + ); + assert_eq!( + Version::parse("1.2.3 abc"), + parse_error("Extra junk after valid version: abc") + ); + + assert_eq!( + Version::parse("1.2.3"), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: Vec::new(), + build: Vec::new(), + }) + ); + + assert_eq!(Version::parse("1.2.3"), Ok(Version::new(1, 2, 3))); + + assert_eq!( + Version::parse(" 1.2.3 "), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: Vec::new(), + build: Vec::new(), + }) + ); + assert_eq!( + Version::parse("1.2.3-alpha1"), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], + build: Vec::new(), + }) + ); + assert_eq!( + Version::parse(" 1.2.3-alpha1 "), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], + build: Vec::new(), + }) + ); + assert_eq!( + Version::parse("1.2.3+build5"), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: Vec::new(), + build: vec![Identifier::AlphaNumeric(String::from("build5"))], + }) + ); + assert_eq!( + Version::parse(" 1.2.3+build5 "), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: Vec::new(), + build: vec![Identifier::AlphaNumeric(String::from("build5"))], + }) + ); + assert_eq!( + Version::parse("1.2.3-alpha1+build5"), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], + build: vec![Identifier::AlphaNumeric(String::from("build5"))], + }) + ); + assert_eq!( + Version::parse(" 1.2.3-alpha1+build5 "), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], + build: vec![Identifier::AlphaNumeric(String::from("build5"))], + }) + ); + assert_eq!( + Version::parse("1.2.3-1.alpha1.9+build5.7.3aedf "), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: vec![ + Identifier::Numeric(1), + Identifier::AlphaNumeric(String::from("alpha1")), + Identifier::Numeric(9), + ], + build: vec![ + Identifier::AlphaNumeric(String::from("build5")), + Identifier::Numeric(7), + Identifier::AlphaNumeric(String::from("3aedf")), + ], + }) + ); + assert_eq!( + Version::parse("0.4.0-beta.1+0851523"), + Ok(Version { + major: 0, + minor: 4, + patch: 0, + pre: vec![ + Identifier::AlphaNumeric(String::from("beta")), + Identifier::Numeric(1), + ], + build: vec![Identifier::AlphaNumeric(String::from("0851523"))], + }) + ); + } + + #[test] + fn test_increment_patch() { + let mut buggy_release = Version::parse("0.1.0").unwrap(); + buggy_release.increment_patch(); + assert_eq!(buggy_release, Version::parse("0.1.1").unwrap()); + } + + #[test] + fn test_increment_minor() { + let mut feature_release = Version::parse("1.4.6").unwrap(); + feature_release.increment_minor(); + assert_eq!(feature_release, Version::parse("1.5.0").unwrap()); + } + + #[test] + fn test_increment_major() { + let mut chrome_release = Version::parse("46.1.246773").unwrap(); + chrome_release.increment_major(); + assert_eq!(chrome_release, Version::parse("47.0.0").unwrap()); + } + + #[test] + fn test_increment_keep_prerelease() { + let mut release = Version::parse("1.0.0-alpha").unwrap(); + release.increment_patch(); + + assert_eq!(release, Version::parse("1.0.1").unwrap()); + + release.increment_minor(); + + assert_eq!(release, Version::parse("1.1.0").unwrap()); + + release.increment_major(); + + assert_eq!(release, Version::parse("2.0.0").unwrap()); + } + + #[test] + fn test_increment_clear_metadata() { + let mut release = Version::parse("1.0.0+4442").unwrap(); + release.increment_patch(); + + assert_eq!(release, Version::parse("1.0.1").unwrap()); + release = Version::parse("1.0.1+hello").unwrap(); + + release.increment_minor(); + + assert_eq!(release, Version::parse("1.1.0").unwrap()); + release = Version::parse("1.1.3747+hello").unwrap(); + + release.increment_major(); + + assert_eq!(release, Version::parse("2.0.0").unwrap()); + } + + #[test] + fn test_eq() { + assert_eq!(Version::parse("1.2.3"), Version::parse("1.2.3")); + assert_eq!( + Version::parse("1.2.3-alpha1"), + Version::parse("1.2.3-alpha1") + ); + assert_eq!( + Version::parse("1.2.3+build.42"), + Version::parse("1.2.3+build.42") + ); + assert_eq!( + Version::parse("1.2.3-alpha1+42"), + Version::parse("1.2.3-alpha1+42") + ); + assert_eq!(Version::parse("1.2.3+23"), Version::parse("1.2.3+42")); + } + + #[test] + fn test_ne() { + assert!(Version::parse("0.0.0") != Version::parse("0.0.1")); + assert!(Version::parse("0.0.0") != Version::parse("0.1.0")); + assert!(Version::parse("0.0.0") != Version::parse("1.0.0")); + assert!(Version::parse("1.2.3-alpha") != Version::parse("1.2.3-beta")); + } + + #[test] + fn test_show() { + assert_eq!( + format!("{}", Version::parse("1.2.3").unwrap()), + "1.2.3".to_string() + ); + assert_eq!( + format!("{}", Version::parse("1.2.3-alpha1").unwrap()), + "1.2.3-alpha1".to_string() + ); + assert_eq!( + format!("{}", Version::parse("1.2.3+build.42").unwrap()), + "1.2.3+build.42".to_string() + ); + assert_eq!( + format!("{}", Version::parse("1.2.3-alpha1+42").unwrap()), + "1.2.3-alpha1+42".to_string() + ); + } + + #[test] + fn test_display() { + let version = Version::parse("1.2.3-rc1").unwrap(); + assert_eq!(format!("{:20}", version), "1.2.3-rc1 "); + assert_eq!(format!("{:*^20}", version), "*****1.2.3-rc1******"); + assert_eq!(format!("{:.4}", version), "1.2."); + } + + #[test] + fn test_to_string() { + assert_eq!( + Version::parse("1.2.3").unwrap().to_string(), + "1.2.3".to_string() + ); + assert_eq!( + Version::parse("1.2.3-alpha1").unwrap().to_string(), + "1.2.3-alpha1".to_string() + ); + assert_eq!( + Version::parse("1.2.3+build.42").unwrap().to_string(), + "1.2.3+build.42".to_string() + ); + assert_eq!( + Version::parse("1.2.3-alpha1+42").unwrap().to_string(), + "1.2.3-alpha1+42".to_string() + ); + } + + #[test] + fn test_lt() { + assert!(Version::parse("0.0.0") < Version::parse("1.2.3-alpha2")); + assert!(Version::parse("1.0.0") < Version::parse("1.2.3-alpha2")); + assert!(Version::parse("1.2.0") < Version::parse("1.2.3-alpha2")); + assert!(Version::parse("1.2.3-alpha1") < Version::parse("1.2.3")); + assert!(Version::parse("1.2.3-alpha1") < Version::parse("1.2.3-alpha2")); + assert!(!(Version::parse("1.2.3-alpha2") < Version::parse("1.2.3-alpha2"))); + assert!(!(Version::parse("1.2.3+23") < Version::parse("1.2.3+42"))); + } + + #[test] + fn test_le() { + assert!(Version::parse("0.0.0") <= Version::parse("1.2.3-alpha2")); + assert!(Version::parse("1.0.0") <= Version::parse("1.2.3-alpha2")); + assert!(Version::parse("1.2.0") <= Version::parse("1.2.3-alpha2")); + assert!(Version::parse("1.2.3-alpha1") <= Version::parse("1.2.3-alpha2")); + assert!(Version::parse("1.2.3-alpha2") <= Version::parse("1.2.3-alpha2")); + assert!(Version::parse("1.2.3+23") <= Version::parse("1.2.3+42")); + } + + #[test] + fn test_gt() { + assert!(Version::parse("1.2.3-alpha2") > Version::parse("0.0.0")); + assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.0.0")); + assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.0")); + assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.3-alpha1")); + assert!(Version::parse("1.2.3") > Version::parse("1.2.3-alpha2")); + assert!(!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.3-alpha2"))); + assert!(!(Version::parse("1.2.3+23") > Version::parse("1.2.3+42"))); + } + + #[test] + fn test_ge() { + assert!(Version::parse("1.2.3-alpha2") >= Version::parse("0.0.0")); + assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.0.0")); + assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.0")); + assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.3-alpha1")); + assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.3-alpha2")); + assert!(Version::parse("1.2.3+23") >= Version::parse("1.2.3+42")); + } + + #[test] + fn test_prerelease_check() { + assert!(Version::parse("1.0.0").unwrap().is_prerelease() == false); + assert!(Version::parse("0.0.1").unwrap().is_prerelease() == false); + assert!(Version::parse("4.1.4-alpha").unwrap().is_prerelease()); + assert!(Version::parse("1.0.0-beta294296").unwrap().is_prerelease()); + } + + #[test] + fn test_spec_order() { + let vs = [ + "1.0.0-alpha", + "1.0.0-alpha.1", + "1.0.0-alpha.beta", + "1.0.0-beta", + "1.0.0-beta.2", + "1.0.0-beta.11", + "1.0.0-rc.1", + "1.0.0", + ]; + let mut i = 1; + while i < vs.len() { + let a = Version::parse(vs[i - 1]); + let b = Version::parse(vs[i]); + assert!(a < b, "nope {:?} < {:?}", a, b); + i += 1; + } + } + + #[test] + fn test_from_str() { + assert_eq!( + "1.2.3".parse(), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: Vec::new(), + build: Vec::new(), + }) + ); + assert_eq!( + " 1.2.3 ".parse(), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: Vec::new(), + build: Vec::new(), + }) + ); + assert_eq!( + "1.2.3-alpha1".parse(), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], + build: Vec::new(), + }) + ); + assert_eq!( + " 1.2.3-alpha1 ".parse(), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], + build: Vec::new(), + }) + ); + assert_eq!( + "1.2.3+build5".parse(), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: Vec::new(), + build: vec![Identifier::AlphaNumeric(String::from("build5"))], + }) + ); + assert_eq!( + " 1.2.3+build5 ".parse(), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: Vec::new(), + build: vec![Identifier::AlphaNumeric(String::from("build5"))], + }) + ); + assert_eq!( + "1.2.3-alpha1+build5".parse(), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], + build: vec![Identifier::AlphaNumeric(String::from("build5"))], + }) + ); + assert_eq!( + " 1.2.3-alpha1+build5 ".parse(), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], + build: vec![Identifier::AlphaNumeric(String::from("build5"))], + }) + ); + assert_eq!( + "1.2.3-1.alpha1.9+build5.7.3aedf ".parse(), + Ok(Version { + major: 1, + minor: 2, + patch: 3, + pre: vec![ + Identifier::Numeric(1), + Identifier::AlphaNumeric(String::from("alpha1")), + Identifier::Numeric(9), + ], + build: vec![ + Identifier::AlphaNumeric(String::from("build5")), + Identifier::Numeric(7), + Identifier::AlphaNumeric(String::from("3aedf")), + ], + }) + ); + assert_eq!( + "0.4.0-beta.1+0851523".parse(), + Ok(Version { + major: 0, + minor: 4, + patch: 0, + pre: vec![ + Identifier::AlphaNumeric(String::from("beta")), + Identifier::Numeric(1), + ], + build: vec![Identifier::AlphaNumeric(String::from("0851523"))], + }) + ); + } + + #[test] + fn test_from_str_errors() { + fn parse_error(e: &str) -> result::Result { + return Err(SemVerError::ParseError(e.to_string())); + } + + assert_eq!("".parse(), parse_error("Error parsing major identifier")); + assert_eq!(" ".parse(), parse_error("Error parsing major identifier")); + assert_eq!("1".parse(), parse_error("Expected dot")); + assert_eq!("1.2".parse(), parse_error("Expected dot")); + assert_eq!("1.2.3-".parse(), parse_error("Error parsing prerelease")); + assert_eq!( + "a.b.c".parse(), + parse_error("Error parsing major identifier") + ); + assert_eq!( + "1.2.3 abc".parse(), + parse_error("Extra junk after valid version: abc") + ); + } +} diff -Nru cargo-0.44.1/vendor/semver/tests/deprecation.rs cargo-0.47.0/vendor/semver/tests/deprecation.rs --- cargo-0.44.1/vendor/semver/tests/deprecation.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/semver/tests/deprecation.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,22 +1,25 @@ -extern crate semver; - -#[test] -fn test_regressions() { - use semver::VersionReq; - use semver::ReqParseError; - - let versions = vec![ - (".*", VersionReq::any()), - ("0.1.0.", VersionReq::parse("0.1.0").unwrap()), - ("0.3.1.3", VersionReq::parse("0.3.13").unwrap()), - ("0.2*", VersionReq::parse("0.2.*").unwrap()), - ("*.0", VersionReq::any()), - ]; - - for (version, requirement) in versions.into_iter() { - let parsed = VersionReq::parse(version); - let error = parsed.err().unwrap(); - - assert_eq!(ReqParseError::DeprecatedVersionRequirement(requirement), error); - } -} +extern crate semver; + +#[test] +fn test_regressions() { + use semver::ReqParseError; + use semver::VersionReq; + + let versions = vec![ + (".*", VersionReq::any()), + ("0.1.0.", VersionReq::parse("0.1.0").unwrap()), + ("0.3.1.3", VersionReq::parse("0.3.13").unwrap()), + ("0.2*", VersionReq::parse("0.2.*").unwrap()), + ("*.0", VersionReq::any()), + ]; + + for (version, requirement) in versions.into_iter() { + let parsed = VersionReq::parse(version); + let error = parsed.err().unwrap(); + + assert_eq!( + ReqParseError::DeprecatedVersionRequirement(requirement), + error + ); + } +} diff -Nru cargo-0.44.1/vendor/semver/tests/diesel.rs cargo-0.47.0/vendor/semver/tests/diesel.rs --- cargo-0.44.1/vendor/semver/tests/diesel.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/semver/tests/diesel.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,209 @@ +#![cfg(feature = "diesel")] + +#[macro_use] +extern crate diesel; +extern crate semver; + +use diesel::connection::SimpleConnection; +use diesel::sql_types::Text; +use diesel::*; +use semver::{Version, VersionReq}; + +table! { + versions (name) { + name -> Text, + vers -> Text, + } +} + +table! { + version_reqs (name) { + name -> Text, + req -> Text, + } +} + +fn connection() -> SqliteConnection { + let conn = SqliteConnection::establish(":memory:").unwrap(); + conn.batch_execute( + " + CREATE TABLE versions (name TEXT PRIMARY KEY NOT NULL, vers TEXT NOT NULL); + CREATE TABLE version_reqs (name TEXT PRIMARY KEY NOT NULL, req TEXT NOT NULL); + ", + ) + .unwrap(); + conn +} + +const VERSIONS_TO_TEST: &[&str] = &[ + "0.0.1", + "0.1.0", + "1.0.0", + "1.0.0-beta1", + "1.0.0-beta.1", + "1.0.0+129384712983", + "1.0.0-beta.1+1234.5678", +]; + +#[test] +fn version_round_trips() { + let conn = connection(); + for version in VERSIONS_TO_TEST { + let version = version.parse::().unwrap(); + let result = select(version.as_sql::()).get_result(&conn); + assert_eq!(Ok(version), result); + } +} + +#[test] +fn version_inserts_and_loads() { + use self::versions::dsl::*; + + let conn = connection(); + let semver_versions = VERSIONS_TO_TEST + .iter() + .enumerate() + .map(|(i, v)| (format!("Version {}", i), v.parse::().unwrap())) + .collect::>(); + + let new_versions = semver_versions + .iter() + .map(|&(ref n, ref v)| (name.eq(n), vers.eq(v))) + .collect::>(); + let inserted_rows = insert_into(versions).values(&new_versions).execute(&conn); + assert_eq!(Ok(VERSIONS_TO_TEST.len()), inserted_rows); + + let actual_data = versions.load(&conn); + assert_eq!(Ok(semver_versions.clone()), actual_data); +} + +#[test] +fn version_inserts_and_loads_on_struct() { + #[derive(Debug, PartialEq, Queryable, Insertable)] + #[table_name = "versions"] + struct Versioned { + name: String, + vers: Version, + } + + let conn = connection(); + let semver_versions = VERSIONS_TO_TEST + .iter() + .enumerate() + .map(|(i, v)| Versioned { + name: format!("Version {}", i), + vers: v.parse::().unwrap(), + }) + .collect::>(); + + let inserted_rows = insert_into(versions::table) + .values(&semver_versions) + .execute(&conn); + assert_eq!(Ok(VERSIONS_TO_TEST.len()), inserted_rows); + + let actual_data = versions::table.load(&conn); + assert_eq!(Ok(semver_versions), actual_data); +} + +const VERSION_REQS_TO_TEST: &[&str] = &[ + "^1.0.0", + "= 1.0.0", + "= 0.9.0", + "= 0.1.0-beta2.a", + ">= 1.0.0", + ">= 2.1.0-alpha2", + "< 1.0.0", + "<= 2.1.0-alpha2", + "^ 1.2.3+meta", + "= 1.2.3+meta", + "> 1.2.3+meta", + ">= 1.2.3+meta", + "< 1.2.3+meta", + "<= 1.2.3+meta", + "~ 1.2.3+meta", + "> 0.0.9, <= 2.5.3", + "0.3.0, 0.4.0", + "<= 0.2.0, >= 0.5.0", + "0.1.0, 0.1.4, 0.1.6", + ">=0.5.1-alpha3, <0.6", + "~1", + "~1.2", + "~1.2.2", + "~1.2.3-beta.2", + "^1", + "^1.1", + "^1.1.2", + "^0.1.2", + "^0.5.1-alpha3", + "", + "*", + "x", + "1.*", +]; + +#[test] +fn version_req_round_trips() { + let conn = connection(); + for version_req in VERSION_REQS_TO_TEST { + let version_req = version_req.parse::().unwrap(); + let result = select(version_req.as_sql::()).get_result(&conn); + assert_eq!(Ok(version_req), result); + } +} + +#[test] +fn version_req_inserts_and_loads() { + use self::version_reqs::dsl::*; + + let conn = connection(); + let semver_version_reqs = VERSION_REQS_TO_TEST + .iter() + .enumerate() + .map(|(i, v)| { + ( + format!("VersionReq {}", i), + v.parse::().unwrap(), + ) + }) + .collect::>(); + + let new_version_reqs = semver_version_reqs + .iter() + .map(|&(ref n, ref v)| (name.eq(n), req.eq(v))) + .collect::>(); + let inserted_rows = insert_into(version_reqs) + .values(&new_version_reqs) + .execute(&conn); + assert_eq!(Ok(VERSION_REQS_TO_TEST.len()), inserted_rows); + + let actual_data = version_reqs.load(&conn); + assert_eq!(Ok(semver_version_reqs.clone()), actual_data); +} + +#[test] +fn version_req_inserts_and_loads_on_struct() { + #[derive(Debug, PartialEq, Queryable, Insertable)] + #[table_name = "version_reqs"] + struct VersionReqed { + name: String, + req: VersionReq, + } + + let conn = connection(); + let semver_version_reqs = VERSION_REQS_TO_TEST + .iter() + .enumerate() + .map(|(i, v)| VersionReqed { + name: format!("VersionReq {}", i), + req: v.parse::().unwrap(), + }) + .collect::>(); + + let inserted_rows = insert_into(version_reqs::table) + .values(&semver_version_reqs) + .execute(&conn); + assert_eq!(Ok(VERSION_REQS_TO_TEST.len()), inserted_rows); + + let actual_data = version_reqs::table.load(&conn); + assert_eq!(Ok(semver_version_reqs), actual_data); +} diff -Nru cargo-0.44.1/vendor/semver/tests/regression.rs cargo-0.47.0/vendor/semver/tests/regression.rs --- cargo-0.44.1/vendor/semver/tests/regression.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/semver/tests/regression.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -extern crate semver; -extern crate crates_index; -extern crate tempdir; - -// This test checks to see if every existing crate parses successfully. Important to not break the -// Rust universe! - -#[cfg(feature = "ci")] -#[test] -fn test_regressions() { - use tempdir::TempDir; - use crates_index::Index; - use semver::Version; - - let dir = TempDir::new("semver").unwrap(); - let index = Index::new(dir.into_path()); - index.clone().unwrap(); - - for krate in index.crates() { - for version in krate.versions() { - let v = version.version(); - assert!(Version::parse(v).is_ok(), "failed: {} ({})", version.name(), v); - } - } -} diff -Nru cargo-0.44.1/vendor/semver/tests/serde.rs cargo-0.47.0/vendor/semver/tests/serde.rs --- cargo-0.44.1/vendor/semver/tests/serde.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/semver/tests/serde.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,90 +1,93 @@ -#![cfg(feature = "serde")] - -#[macro_use] -extern crate serde_derive; - -extern crate semver; -extern crate serde_json; - -use semver::{Identifier, Version, VersionReq}; - -#[derive(Serialize, Deserialize, PartialEq, Debug)] -struct Identified { - name: String, - identifier: Identifier, -} - -#[derive(Serialize, Deserialize, PartialEq, Debug)] -struct Versioned { - name: String, - vers: Version, -} - -#[test] -fn serialize_identifier() { - let id = Identified { - name: "serde".to_owned(), - identifier: Identifier::Numeric(100), - }; - let j = serde_json::to_string(&id).unwrap(); - assert_eq!(j, r#"{"name":"serde","identifier":100}"#); - - let id = Identified { - name: "serde".to_owned(), - identifier: Identifier::AlphaNumeric("b100".to_owned()), - }; - let j = serde_json::to_string(&id).unwrap(); - assert_eq!(j, r#"{"name":"serde","identifier":"b100"}"#); -} - -#[test] -fn deserialize_identifier() { - let j = r#"{"name":"serde","identifier":100}"#; - let id = serde_json::from_str::(j).unwrap(); - let expected = Identified { - name: "serde".to_owned(), - identifier: Identifier::Numeric(100), - }; - assert_eq!(id, expected); - - let j = r#"{"name":"serde","identifier":"b100"}"#; - let id = serde_json::from_str::(j).unwrap(); - let expected = Identified { - name: "serde".to_owned(), - identifier: Identifier::AlphaNumeric("b100".to_owned()), - }; - assert_eq!(id, expected); -} - -#[test] -fn serialize_version() { - let v = Versioned { - name: "serde".to_owned(), - vers: Version::parse("1.0.0").unwrap(), - }; - let j = serde_json::to_string(&v).unwrap(); - assert_eq!(j, r#"{"name":"serde","vers":"1.0.0"}"#); -} - -#[test] -fn deserialize_version() { - let j = r#"{"name":"serde","vers":"1.0.0"}"#; - let v = serde_json::from_str::(j).unwrap(); - let expected = Versioned { - name: "serde".to_owned(), - vers: Version::parse("1.0.0").unwrap(), - }; - assert_eq!(v, expected); -} - -#[test] -fn serialize_versionreq() { - let v = VersionReq::exact(&Version::parse("1.0.0").unwrap()); - - assert_eq!(serde_json::to_string(&v).unwrap(), r#""= 1.0.0""#); -} - -#[test] -fn deserialize_versionreq() { - assert_eq!("1.0.0".parse::().unwrap(), serde_json::from_str(r#""1.0.0""#).unwrap()); -} +#![cfg(feature = "serde")] + +#[macro_use] +extern crate serde_derive; + +extern crate semver; +extern crate serde_json; + +use semver::{Identifier, Version, VersionReq}; + +#[derive(Serialize, Deserialize, PartialEq, Debug)] +struct Identified { + name: String, + identifier: Identifier, +} + +#[derive(Serialize, Deserialize, PartialEq, Debug)] +struct Versioned { + name: String, + vers: Version, +} + +#[test] +fn serialize_identifier() { + let id = Identified { + name: "serde".to_owned(), + identifier: Identifier::Numeric(100), + }; + let j = serde_json::to_string(&id).unwrap(); + assert_eq!(j, r#"{"name":"serde","identifier":100}"#); + + let id = Identified { + name: "serde".to_owned(), + identifier: Identifier::AlphaNumeric("b100".to_owned()), + }; + let j = serde_json::to_string(&id).unwrap(); + assert_eq!(j, r#"{"name":"serde","identifier":"b100"}"#); +} + +#[test] +fn deserialize_identifier() { + let j = r#"{"name":"serde","identifier":100}"#; + let id = serde_json::from_str::(j).unwrap(); + let expected = Identified { + name: "serde".to_owned(), + identifier: Identifier::Numeric(100), + }; + assert_eq!(id, expected); + + let j = r#"{"name":"serde","identifier":"b100"}"#; + let id = serde_json::from_str::(j).unwrap(); + let expected = Identified { + name: "serde".to_owned(), + identifier: Identifier::AlphaNumeric("b100".to_owned()), + }; + assert_eq!(id, expected); +} + +#[test] +fn serialize_version() { + let v = Versioned { + name: "serde".to_owned(), + vers: Version::parse("1.0.0").unwrap(), + }; + let j = serde_json::to_string(&v).unwrap(); + assert_eq!(j, r#"{"name":"serde","vers":"1.0.0"}"#); +} + +#[test] +fn deserialize_version() { + let j = r#"{"name":"serde","vers":"1.0.0"}"#; + let v = serde_json::from_str::(j).unwrap(); + let expected = Versioned { + name: "serde".to_owned(), + vers: Version::parse("1.0.0").unwrap(), + }; + assert_eq!(v, expected); +} + +#[test] +fn serialize_versionreq() { + let v = VersionReq::exact(&Version::parse("1.0.0").unwrap()); + + assert_eq!(serde_json::to_string(&v).unwrap(), r#""= 1.0.0""#); +} + +#[test] +fn deserialize_versionreq() { + assert_eq!( + "1.0.0".parse::().unwrap(), + serde_json::from_str(r#""1.0.0""#).unwrap() + ); +} diff -Nru cargo-0.44.1/vendor/serde/build.rs cargo-0.47.0/vendor/serde/build.rs --- cargo-0.44.1/vendor/serde/build.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -66,7 +66,7 @@ } // Non-zero integers stabilized in Rust 1.28: - // https://github.com/rust-lang/rust/pull/50808 + // https://blog.rust-lang.org/2018/08/02/Rust-1.28.html#library-stabilizations if minor >= 28 { println!("cargo:rustc-cfg=num_nonzero"); } diff -Nru cargo-0.44.1/vendor/serde/.cargo-checksum.json cargo-0.47.0/vendor/serde/.cargo-checksum.json --- cargo-0.44.1/vendor/serde/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"99e7b308464d16b56eba9964e4972a3eee817760ab60d88c3f86e1fecb08204c"} \ No newline at end of file +{"files":{},"package":"96fe57af81d28386a513cbc6858332abc6117cfdb5999647c6444b8f43a370a5"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/serde/Cargo.toml cargo-0.47.0/vendor/serde/Cargo.toml --- cargo-0.44.1/vendor/serde/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,10 +12,10 @@ [package] name = "serde" -version = "1.0.110" +version = "1.0.116" authors = ["Erick Tryzelaar ", "David Tolnay "] build = "build.rs" -include = ["Cargo.toml", "build.rs", "src/**/*.rs", "crates-io.md", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +include = ["build.rs", "src/**/*.rs", "crates-io.md", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] description = "A generic serialization/deserialization framework" homepage = "https://serde.rs" documentation = "https://docs.serde.rs/serde/" @@ -30,7 +30,7 @@ [package.metadata.playground] features = ["derive", "rc"] [dependencies.serde_derive] -version = "=1.0.110" +version = "=1.0.116" optional = true [dev-dependencies.serde_derive] version = "1.0" diff -Nru cargo-0.44.1/vendor/serde/crates-io.md cargo-0.47.0/vendor/serde/crates-io.md --- cargo-0.44.1/vendor/serde/crates-io.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde/crates-io.md 2020-10-01 21:38:28.000000000 +0000 @@ -43,10 +43,20 @@ ## Getting help -Serde developers live in the #serde channel on [`irc.mozilla.org`][irc]. The -\#rust channel is also a good resource with generally faster response time but -less specific knowledge about Serde. If IRC is not your thing or you don't get a -good response, we are happy to respond to [GitHub issues][issues] as well. +Serde is one of the most widely used Rust libraries so any place that Rustaceans +congregate will be able to help you out. For chat, consider trying the +[#general] or [#beginners] channels of the unofficial community Discord, the +[#rust-usage] channel of the official Rust Project Discord, or the +[#general][zulip] stream in Zulip. For asynchronous, consider the [\[rust\] tag +on StackOverflow][stackoverflow], the [/r/rust] subreddit which has a pinned +weekly easy questions post, or the Rust [Discourse forum][discourse]. It's +acceptable to file a support issue in this repo but they tend not to get as many +eyes as any of the above and may get closed without a response after some time. -[irc]: https://wiki.mozilla.org/IRC -[issues]: https://github.com/serde-rs/serde/issues/new/choose +[#general]: https://discord.com/channels/273534239310479360/274215136414400513 +[#beginners]: https://discord.com/channels/273534239310479360/273541522815713281 +[#rust-usage]: https://discord.com/channels/442252698964721669/443150878111694848 +[zulip]: https://rust-lang.zulipchat.com/#narrow/stream/122651-general +[stackoverflow]: https://stackoverflow.com/questions/tagged/rust +[/r/rust]: https://www.reddit.com/r/rust +[discourse]: https://users.rust-lang.org diff -Nru cargo-0.44.1/vendor/serde/README.md cargo-0.47.0/vendor/serde/README.md --- cargo-0.44.1/vendor/serde/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -75,13 +75,23 @@ ## Getting help -Serde developers live in the #serde channel on [`irc.mozilla.org`][irc]. The -\#rust channel is also a good resource with generally faster response time but -less specific knowledge about Serde. If IRC is not your thing or you don't get a -good response, we are happy to respond to [GitHub issues][issues] as well. +Serde is one of the most widely used Rust libraries so any place that Rustaceans +congregate will be able to help you out. For chat, consider trying the +[#general] or [#beginners] channels of the unofficial community Discord, the +[#rust-usage] channel of the official Rust Project Discord, or the +[#general][zulip] stream in Zulip. For asynchronous, consider the [\[rust\] tag +on StackOverflow][stackoverflow], the [/r/rust] subreddit which has a pinned +weekly easy questions post, or the Rust [Discourse forum][discourse]. It's +acceptable to file a support issue in this repo but they tend not to get as many +eyes as any of the above and may get closed without a response after some time. -[irc]: https://wiki.mozilla.org/IRC -[issues]: https://github.com/serde-rs/serde/issues/new/choose +[#general]: https://discord.com/channels/273534239310479360/274215136414400513 +[#beginners]: https://discord.com/channels/273534239310479360/273541522815713281 +[#rust-usage]: https://discord.com/channels/442252698964721669/443150878111694848 +[zulip]: https://rust-lang.zulipchat.com/#narrow/stream/122651-general +[stackoverflow]: https://stackoverflow.com/questions/tagged/rust +[/r/rust]: https://www.reddit.com/r/rust +[discourse]: https://users.rust-lang.org
diff -Nru cargo-0.44.1/vendor/serde/src/de/impls.rs cargo-0.47.0/vendor/serde/src/de/impls.rs --- cargo-0.44.1/vendor/serde/src/de/impls.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde/src/de/impls.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1313,7 +1313,7 @@ formatter.write_str($expecting_message) } - fn visit_u32(self, value: u32) -> Result + fn visit_u64(self, value: u64) -> Result where E: Error, { @@ -1321,7 +1321,7 @@ $( $index => Ok($name_kind :: $variant), )* - _ => Err(Error::invalid_value(Unexpected::Unsigned(value as u64), &self),), + _ => Err(Error::invalid_value(Unexpected::Unsigned(value), &self),), } } @@ -2326,7 +2326,7 @@ formatter.write_str("`Unbounded`, `Included` or `Excluded`") } - fn visit_u32(self, value: u32) -> Result + fn visit_u64(self, value: u64) -> Result where E: Error, { @@ -2335,7 +2335,7 @@ 1 => Ok(Field::Included), 2 => Ok(Field::Excluded), _ => Err(Error::invalid_value( - Unexpected::Unsigned(value as u64), + Unexpected::Unsigned(value), &self, )), } @@ -2492,7 +2492,7 @@ formatter.write_str("`Ok` or `Err`") } - fn visit_u32(self, value: u32) -> Result + fn visit_u64(self, value: u64) -> Result where E: Error, { @@ -2500,7 +2500,7 @@ 0 => Ok(Field::Ok), 1 => Ok(Field::Err), _ => Err(Error::invalid_value( - Unexpected::Unsigned(value as u64), + Unexpected::Unsigned(value), &self, )), } diff -Nru cargo-0.44.1/vendor/serde/src/de/mod.rs cargo-0.47.0/vendor/serde/src/de/mod.rs --- cargo-0.44.1/vendor/serde/src/de/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde/src/de/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -104,7 +104,7 @@ //! [`Deserialize`]: ../trait.Deserialize.html //! [`Deserializer`]: ../trait.Deserializer.html //! [`LinkedHashMap`]: https://docs.rs/linked-hash-map/*/linked_hash_map/struct.LinkedHashMap.html -//! [`bincode`]: https://github.com/TyOverby/bincode +//! [`bincode`]: https://github.com/servo/bincode //! [`linked-hash-map`]: https://crates.io/crates/linked-hash-map //! [`serde_derive`]: https://crates.io/crates/serde_derive //! [`serde_json`]: https://github.com/serde-rs/json diff -Nru cargo-0.44.1/vendor/serde/src/lib.rs cargo-0.47.0/vendor/serde/src/lib.rs --- cargo-0.44.1/vendor/serde/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -37,8 +37,8 @@ //! used for IPC within the Servo rendering engine. //! - [CBOR], a Concise Binary Object Representation designed for small message //! size without the need for version negotiation. -//! - [YAML], a popular human-friendly configuration language that ain't markup -//! language. +//! - [YAML], a self-proclaimed human-friendly configuration language that ain't +//! markup language. //! - [MessagePack], an efficient binary format that resembles a compact JSON. //! - [TOML], a minimal configuration format used by [Cargo]. //! - [Pickle], a format common in the Python world. @@ -48,14 +48,18 @@ //! definition. //! - [JSON5], A superset of JSON including some productions from ES5. //! - [Postcard], a no\_std and embedded-systems friendly compact binary format. -//! - [URL], the x-www-form-urlencoded format. +//! - [URL] query strings, in the x-www-form-urlencoded format. //! - [Envy], a way to deserialize environment variables into Rust structs. //! *(deserialization only)* //! - [Envy Store], a way to deserialize [AWS Parameter Store] parameters into //! Rust structs. *(deserialization only)* +//! - [S-expressions], the textual representation of code and data used by the +//! Lisp language family. +//! - [D-Bus]'s binary wire format. +//! - [FlexBuffers], the schemaless cousin of Google's FlatBuffers zero-copy serialization format. //! //! [JSON]: https://github.com/serde-rs/json -//! [Bincode]: https://github.com/TyOverby/bincode +//! [Bincode]: https://github.com/servo/bincode //! [CBOR]: https://github.com/pyfisch/cbor //! [YAML]: https://github.com/dtolnay/serde-yaml //! [MessagePack]: https://github.com/3Hren/msgpack-rust @@ -66,23 +70,26 @@ //! [Avro]: https://github.com/flavray/avro-rs //! [JSON5]: https://github.com/callum-oakley/json5-rs //! [Postcard]: https://github.com/jamesmunns/postcard -//! [URL]: https://github.com/nox/serde_urlencoded +//! [URL]: https://docs.rs/serde_qs //! [Envy]: https://github.com/softprops/envy //! [Envy Store]: https://github.com/softprops/envy-store //! [Cargo]: http://doc.crates.io/manifest.html //! [AWS Parameter Store]: https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-paramstore.html +//! [S-expressions]: https://github.com/rotty/lexpr-rs +//! [D-Bus]: https://docs.rs/zvariant +//! [FlexBuffers]: https://github.com/google/flatbuffers/tree/master/rust/flexbuffers //////////////////////////////////////////////////////////////////////////////// // Serde types in rustdoc of other crates get linked to here. -#![doc(html_root_url = "https://docs.rs/serde/1.0.110")] +#![doc(html_root_url = "https://docs.rs/serde/1.0.116")] // Support using Serde without the standard library! #![cfg_attr(not(feature = "std"), no_std)] // Unstable functionality only if the user asks for it. For tracking and // discussion of these features please refer to this issue: // // https://github.com/serde-rs/serde/issues/812 -#![cfg_attr(feature = "unstable", feature(specialization, never_type))] +#![cfg_attr(feature = "unstable", feature(never_type))] #![allow(unknown_lints, bare_trait_objects, deprecated)] #![cfg_attr(feature = "cargo-clippy", allow(renamed_and_removed_lints))] #![cfg_attr(feature = "cargo-clippy", deny(clippy, clippy_pedantic))] @@ -90,6 +97,8 @@ #![cfg_attr( feature = "cargo-clippy", allow( + // clippy bug: https://github.com/rust-lang/rust-clippy/issues/5704 + unnested_or_patterns, // not available in our oldest supported compiler checked_conversions, empty_enum, @@ -102,6 +111,7 @@ // things are often more readable this way cast_lossless, module_name_repetitions, + option_if_let_else, single_match_else, type_complexity, use_self, diff -Nru cargo-0.44.1/vendor/serde/src/private/de.rs cargo-0.47.0/vendor/serde/src/private/de.rs --- cargo-0.44.1/vendor/serde/src/private/de.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde/src/private/de.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1562,7 +1562,7 @@ other.unexpected(), &"struct variant", )), - _ => Err(de::Error::invalid_type( + None => Err(de::Error::invalid_type( de::Unexpected::UnitVariant, &"struct variant", )), @@ -2252,7 +2252,7 @@ other.unexpected(), &"struct variant", )), - _ => Err(de::Error::invalid_type( + None => Err(de::Error::invalid_type( de::Unexpected::UnitVariant, &"struct variant", )), @@ -2763,6 +2763,13 @@ } } + fn deserialize_unit(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_unit() + } + forward_to_deserialize_other! { deserialize_bool() deserialize_i8() @@ -2780,7 +2787,6 @@ deserialize_string() deserialize_bytes() deserialize_byte_buf() - deserialize_unit() deserialize_unit_struct(&'static str) deserialize_seq() deserialize_tuple(usize) diff -Nru cargo-0.44.1/vendor/serde/src/private/ser.rs cargo-0.47.0/vendor/serde/src/private/ser.rs --- cargo-0.44.1/vendor/serde/src/private/ser.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde/src/private/ser.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1124,7 +1124,7 @@ } fn serialize_unit(self) -> Result { - Err(Self::bad_type(Unsupported::Unit)) + Ok(()) } fn serialize_unit_struct(self, _: &'static str) -> Result { @@ -1243,6 +1243,18 @@ self.0.serialize_value(value) } + fn serialize_entry( + &mut self, + key: &K, + value: &V, + ) -> Result<(), Self::Error> + where + K: Serialize, + V: Serialize, + { + self.0.serialize_entry(key, value) + } + fn end(self) -> Result<(), Self::Error> { Ok(()) } diff -Nru cargo-0.44.1/vendor/serde/src/ser/mod.rs cargo-0.47.0/vendor/serde/src/ser/mod.rs --- cargo-0.44.1/vendor/serde/src/ser/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde/src/ser/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -99,7 +99,7 @@ //! [`LinkedHashMap`]: https://docs.rs/linked-hash-map/*/linked_hash_map/struct.LinkedHashMap.html //! [`Serialize`]: ../trait.Serialize.html //! [`Serializer`]: ../trait.Serializer.html -//! [`bincode`]: https://github.com/TyOverby/bincode +//! [`bincode`]: https://github.com/servo/bincode //! [`linked-hash-map`]: https://crates.io/crates/linked-hash-map //! [`serde_derive`]: https://crates.io/crates/serde_derive //! [`serde_json`]: https://github.com/serde-rs/json @@ -1278,7 +1278,7 @@ ::Item: Serialize, { let iter = iter.into_iter(); - let mut serializer = try!(self.serialize_seq(iter.len_hint())); + let mut serializer = try!(self.serialize_seq(iterator_len_hint(&iter))); for item in iter { try!(serializer.serialize_element(&item)); } @@ -1318,7 +1318,7 @@ I: IntoIterator, { let iter = iter.into_iter(); - let mut serializer = try!(self.serialize_map(iter.len_hint())); + let mut serializer = try!(self.serialize_map(iterator_len_hint(&iter))); for (key, value) in iter { try!(serializer.serialize_entry(&key, &value)); } @@ -1953,35 +1953,6 @@ fn end(self) -> Result; } -trait LenHint: Iterator { - fn len_hint(&self) -> Option; -} - -impl LenHint for I -where - I: Iterator, -{ - #[cfg(not(feature = "unstable"))] - fn len_hint(&self) -> Option { - iterator_len_hint(self) - } - - #[cfg(feature = "unstable")] - default fn len_hint(&self) -> Option { - iterator_len_hint(self) - } -} - -#[cfg(feature = "unstable")] -impl LenHint for I -where - I: ExactSizeIterator, -{ - fn len_hint(&self) -> Option { - Some(self.len()) - } -} - fn iterator_len_hint(iter: &I) -> Option where I: Iterator, diff -Nru cargo-0.44.1/vendor/serde_derive/build.rs cargo-0.47.0/vendor/serde_derive/build.rs --- cargo-0.44.1/vendor/serde_derive/build.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_derive/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,30 @@ +use std::env; +use std::process::Command; +use std::str; + +// The rustc-cfg strings below are *not* public API. Please let us know by +// opening a GitHub issue if your build environment requires some way to enable +// these cfgs other than by executing our build script. +fn main() { + let minor = match rustc_minor_version() { + Some(minor) => minor, + None => return, + }; + + // Underscore const names stabilized in Rust 1.37: + // https://blog.rust-lang.org/2019/08/15/Rust-1.37.0.html#using-unnamed-const-items-for-macros + if minor >= 37 { + println!("cargo:rustc-cfg=underscore_consts"); + } +} + +fn rustc_minor_version() -> Option { + let rustc = env::var_os("RUSTC")?; + let output = Command::new(rustc).arg("--version").output().ok()?; + let version = str::from_utf8(&output.stdout).ok()?; + let mut pieces = version.split('.'); + if pieces.next() != Some("rustc 1") { + return None; + } + pieces.next()?.parse().ok() +} diff -Nru cargo-0.44.1/vendor/serde_derive/.cargo-checksum.json cargo-0.47.0/vendor/serde_derive/.cargo-checksum.json --- cargo-0.44.1/vendor/serde_derive/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_derive/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"818fbf6bfa9a42d3bfcaca148547aa00c7b915bec71d1757aa2d44ca68771984"} \ No newline at end of file +{"files":{},"package":"f630a6370fd8e457873b4bd2ffdae75408bc291ba72be773772a4c2a065d9ae8"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/serde_derive/Cargo.toml cargo-0.47.0/vendor/serde_derive/Cargo.toml --- cargo-0.44.1/vendor/serde_derive/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_derive/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,9 +12,9 @@ [package] name = "serde_derive" -version = "1.0.110" +version = "1.0.116" authors = ["Erick Tryzelaar ", "David Tolnay "] -include = ["Cargo.toml", "src/**/*.rs", "crates-io.md", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +include = ["build.rs", "src/**/*.rs", "crates-io.md", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] description = "Macros 1.1 implementation of #[derive(Serialize, Deserialize)]" homepage = "https://serde.rs" documentation = "https://serde.rs/derive.html" @@ -35,7 +35,7 @@ version = "1.0" [dependencies.syn] -version = "1.0" +version = "1.0.33" features = ["visit"] [dev-dependencies.serde] version = "1.0" diff -Nru cargo-0.44.1/vendor/serde_derive/crates-io.md cargo-0.47.0/vendor/serde_derive/crates-io.md --- cargo-0.44.1/vendor/serde_derive/crates-io.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_derive/crates-io.md 2020-10-01 21:38:28.000000000 +0000 @@ -43,10 +43,20 @@ ## Getting help -Serde developers live in the #serde channel on [`irc.mozilla.org`][irc]. The -\#rust channel is also a good resource with generally faster response time but -less specific knowledge about Serde. If IRC is not your thing or you don't get a -good response, we are happy to respond to [GitHub issues][issues] as well. +Serde is one of the most widely used Rust libraries so any place that Rustaceans +congregate will be able to help you out. For chat, consider trying the +[#general] or [#beginners] channels of the unofficial community Discord, the +[#rust-usage] channel of the official Rust Project Discord, or the +[#general][zulip] stream in Zulip. For asynchronous, consider the [\[rust\] tag +on StackOverflow][stackoverflow], the [/r/rust] subreddit which has a pinned +weekly easy questions post, or the Rust [Discourse forum][discourse]. It's +acceptable to file a support issue in this repo but they tend not to get as many +eyes as any of the above and may get closed without a response after some time. -[irc]: https://wiki.mozilla.org/IRC -[issues]: https://github.com/serde-rs/serde/issues/new/choose +[#general]: https://discord.com/channels/273534239310479360/274215136414400513 +[#beginners]: https://discord.com/channels/273534239310479360/273541522815713281 +[#rust-usage]: https://discord.com/channels/442252698964721669/443150878111694848 +[zulip]: https://rust-lang.zulipchat.com/#narrow/stream/122651-general +[stackoverflow]: https://stackoverflow.com/questions/tagged/rust +[/r/rust]: https://www.reddit.com/r/rust +[discourse]: https://users.rust-lang.org diff -Nru cargo-0.44.1/vendor/serde_derive/README.md cargo-0.47.0/vendor/serde_derive/README.md --- cargo-0.44.1/vendor/serde_derive/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_derive/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -75,13 +75,23 @@ ## Getting help -Serde developers live in the #serde channel on [`irc.mozilla.org`][irc]. The -\#rust channel is also a good resource with generally faster response time but -less specific knowledge about Serde. If IRC is not your thing or you don't get a -good response, we are happy to respond to [GitHub issues][issues] as well. +Serde is one of the most widely used Rust libraries so any place that Rustaceans +congregate will be able to help you out. For chat, consider trying the +[#general] or [#beginners] channels of the unofficial community Discord, the +[#rust-usage] channel of the official Rust Project Discord, or the +[#general][zulip] stream in Zulip. For asynchronous, consider the [\[rust\] tag +on StackOverflow][stackoverflow], the [/r/rust] subreddit which has a pinned +weekly easy questions post, or the Rust [Discourse forum][discourse]. It's +acceptable to file a support issue in this repo but they tend not to get as many +eyes as any of the above and may get closed without a response after some time. -[irc]: https://wiki.mozilla.org/IRC -[issues]: https://github.com/serde-rs/serde/issues/new/choose +[#general]: https://discord.com/channels/273534239310479360/274215136414400513 +[#beginners]: https://discord.com/channels/273534239310479360/273541522815713281 +[#rust-usage]: https://discord.com/channels/442252698964721669/443150878111694848 +[zulip]: https://rust-lang.zulipchat.com/#narrow/stream/122651-general +[stackoverflow]: https://stackoverflow.com/questions/tagged/rust +[/r/rust]: https://www.reddit.com/r/rust +[discourse]: https://users.rust-lang.org
diff -Nru cargo-0.44.1/vendor/serde_derive/src/bound.rs cargo-0.47.0/vendor/serde_derive/src/bound.rs --- cargo-0.44.1/vendor/serde_derive/src/bound.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_derive/src/bound.rs 2020-10-01 21:38:28.000000000 +0000 @@ -5,7 +5,7 @@ use syn::visit::{self, Visit}; use internals::ast::{Container, Data}; -use internals::attr; +use internals::{attr, ungroup}; use proc_macro2::Span; @@ -114,7 +114,7 @@ } impl<'ast> Visit<'ast> for FindTyParams<'ast> { fn visit_field(&mut self, field: &'ast syn::Field) { - if let syn::Type::Path(ty) = &field.ty { + if let syn::Type::Path(ty) = ungroup(&field.ty) { if let Some(Pair::Punctuated(t, _)) = ty.path.segments.pairs().next() { if self.all_type_params.contains(&t.ident) { self.associated_type_usage.push(ty); diff -Nru cargo-0.44.1/vendor/serde_derive/src/de.rs cargo-0.47.0/vendor/serde_derive/src/de.rs --- cargo-0.44.1/vendor/serde_derive/src/de.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_derive/src/de.rs 2020-10-01 21:38:28.000000000 +0000 @@ -8,7 +8,7 @@ use dummy; use fragment::{Expr, Fragment, Match, Stmts}; use internals::ast::{Container, Data, Field, Style, Variant}; -use internals::{attr, Ctxt, Derive}; +use internals::{attr, ungroup, Ctxt, Derive}; use pretend; use std::collections::BTreeSet; @@ -77,7 +77,7 @@ fn precondition_sized(cx: &Ctxt, cont: &Container) { if let Data::Struct(_, fields) = &cont.data { if let Some(last) = fields.last() { - if let syn::Type::Slice(_) = *last.ty { + if let syn::Type::Slice(_) = ungroup(last.ty) { cx.error_spanned_by( cont.original, "cannot deserialize a dynamically sized struct", diff -Nru cargo-0.44.1/vendor/serde_derive/src/dummy.rs cargo-0.47.0/vendor/serde_derive/src/dummy.rs --- cargo-0.44.1/vendor/serde_derive/src/dummy.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_derive/src/dummy.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,5 @@ -use proc_macro2::{Ident, Span, TokenStream}; +use proc_macro2::{Ident, TokenStream}; +use quote::format_ident; use syn; use try; @@ -11,10 +12,11 @@ ) -> TokenStream { let try_replacement = try::replacement(); - let dummy_const = Ident::new( - &format!("_IMPL_{}_FOR_{}", trait_, unraw(ty)), - Span::call_site(), - ); + let dummy_const = if cfg!(underscore_consts) { + format_ident!("_") + } else { + format_ident!("_IMPL_{}_FOR_{}", trait_, unraw(ty)) + }; let use_serde = match serde_path { Some(path) => quote! { diff -Nru cargo-0.44.1/vendor/serde_derive/src/internals/attr.rs cargo-0.47.0/vendor/serde_derive/src/internals/attr.rs --- cargo-0.44.1/vendor/serde_derive/src/internals/attr.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_derive/src/internals/attr.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,5 +1,5 @@ use internals::symbol::*; -use internals::Ctxt; +use internals::{ungroup, Ctxt}; use proc_macro2::{Group, Span, TokenStream, TokenTree}; use quote::ToTokens; use std::borrow::Cow; @@ -597,7 +597,11 @@ for attr in &item.attrs { if attr.path.is_ident("repr") { let _ = attr.parse_args_with(|input: ParseStream| { - is_packed |= input.parse::()? == "packed"; + while let Some(token) = input.parse()? { + if let TokenTree::Ident(ident) = token { + is_packed |= ident == "packed"; + } + } Ok(()) }); } @@ -1737,7 +1741,7 @@ // cow: Cow<'a, str>, // } fn is_cow(ty: &syn::Type, elem: fn(&syn::Type) -> bool) -> bool { - let path = match ty { + let path = match ungroup(ty) { syn::Type::Path(ty) => &ty.path, _ => { return false; @@ -1764,7 +1768,7 @@ } fn is_option(ty: &syn::Type, elem: fn(&syn::Type) -> bool) -> bool { - let path = match ty { + let path = match ungroup(ty) { syn::Type::Path(ty) => &ty.path, _ => { return false; @@ -1811,7 +1815,7 @@ // r: &'a str, // } fn is_reference(ty: &syn::Type, elem: fn(&syn::Type) -> bool) -> bool { - match ty { + match ungroup(ty) { syn::Type::Reference(ty) => ty.mutability.is_none() && elem(&ty.elem), _ => false, } @@ -1822,14 +1826,14 @@ } fn is_slice_u8(ty: &syn::Type) -> bool { - match ty { + match ungroup(ty) { syn::Type::Slice(ty) => is_primitive_type(&ty.elem, "u8"), _ => false, } } fn is_primitive_type(ty: &syn::Type, primitive: &str) -> bool { - match ty { + match ungroup(ty) { syn::Type::Path(ty) => ty.qself.is_none() && is_primitive_path(&ty.path, primitive), _ => false, } diff -Nru cargo-0.44.1/vendor/serde_derive/src/internals/check.rs cargo-0.47.0/vendor/serde_derive/src/internals/check.rs --- cargo-0.44.1/vendor/serde_derive/src/internals/check.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_derive/src/internals/check.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,6 +1,6 @@ use internals::ast::{Container, Data, Field, Style}; use internals::attr::{Identifier, TagType}; -use internals::{Ctxt, Derive}; +use internals::{ungroup, Ctxt, Derive}; use syn::{Member, Type}; /// Cross-cutting checks that require looking at more than a single attrs @@ -396,7 +396,7 @@ } fn allow_transparent(field: &Field, derive: Derive) -> bool { - if let Type::Path(ty) = field.ty { + if let Type::Path(ty) = ungroup(&field.ty) { if let Some(seg) = ty.path.segments.last() { if seg.ident == "PhantomData" { return false; diff -Nru cargo-0.44.1/vendor/serde_derive/src/internals/mod.rs cargo-0.47.0/vendor/serde_derive/src/internals/mod.rs --- cargo-0.44.1/vendor/serde_derive/src/internals/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_derive/src/internals/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -8,8 +8,17 @@ mod check; mod symbol; +use syn::Type; + #[derive(Copy, Clone)] pub enum Derive { Serialize, Deserialize, } + +pub fn ungroup(mut ty: &Type) -> &Type { + while let Type::Group(group) = ty { + ty = &group.elem; + } + ty +} diff -Nru cargo-0.44.1/vendor/serde_derive/src/lib.rs cargo-0.47.0/vendor/serde_derive/src/lib.rs --- cargo-0.44.1/vendor/serde_derive/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_derive/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -13,18 +13,21 @@ //! //! [https://serde.rs/derive.html]: https://serde.rs/derive.html -#![doc(html_root_url = "https://docs.rs/serde_derive/1.0.110")] +#![doc(html_root_url = "https://docs.rs/serde_derive/1.0.116")] #![allow(unknown_lints, bare_trait_objects)] #![deny(clippy::all, clippy::pedantic)] // Ignored clippy lints #![allow( clippy::cognitive_complexity, clippy::enum_variant_names, + clippy::match_like_matches_macro, clippy::needless_pass_by_value, clippy::too_many_arguments, clippy::trivially_copy_pass_by_ref, clippy::used_underscore_binding, - clippy::wildcard_in_or_patterns + clippy::wildcard_in_or_patterns, + // clippy bug: https://github.com/rust-lang/rust-clippy/issues/5704 + clippy::unnested_or_patterns, )] // Ignored clippy_pedantic lints #![allow( @@ -38,6 +41,7 @@ clippy::match_same_arms, clippy::module_name_repetitions, clippy::must_use_candidate, + clippy::option_if_let_else, clippy::similar_names, clippy::single_match_else, clippy::struct_excessive_bools, @@ -46,8 +50,6 @@ clippy::use_self, clippy::wildcard_imports )] -// The `quote!` macro requires deep recursion. -#![recursion_limit = "512"] #[macro_use] extern crate quote; diff -Nru cargo-0.44.1/vendor/serde_ignored/.cargo-checksum.json cargo-0.47.0/vendor/serde_ignored/.cargo-checksum.json --- cargo-0.44.1/vendor/serde_ignored/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_ignored/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"7248fdcbd17d3f2604fc2a02d0ecc844d9a7bf52bf95fc196d9f0a38f6da6a0e"} \ No newline at end of file +{"files":{},"package":"1c2c7d39d14f2f2ea82239de71594782f186fd03501ac81f0ce08e674819ff2f"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/serde_ignored/Cargo.toml cargo-0.47.0/vendor/serde_ignored/Cargo.toml --- cargo-0.44.1/vendor/serde_ignored/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_ignored/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "serde_ignored" -version = "0.1.1" +version = "0.1.2" authors = ["David Tolnay "] description = "Find out about keys that are ignored when deserializing data" readme = "README.md" @@ -21,6 +21,8 @@ categories = ["encoding"] license = "MIT OR Apache-2.0" repository = "https://github.com/dtolnay/serde-ignored" +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] [dependencies.serde] version = "1.0" [dev-dependencies.serde_derive] @@ -28,5 +30,3 @@ [dev-dependencies.serde_json] version = "1.0" -[badges.travis-ci] -repository = "dtolnay/serde-ignored" diff -Nru cargo-0.44.1/vendor/serde_ignored/README.md cargo-0.47.0/vendor/serde_ignored/README.md --- cargo-0.44.1/vendor/serde_ignored/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_ignored/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -1,8 +1,9 @@ # Serde ignored -[![Build Status](https://api.travis-ci.org/dtolnay/serde-ignored.svg?branch=master)](https://travis-ci.org/dtolnay/serde-ignored) -[![Latest Version](https://img.shields.io/crates/v/serde-ignored.svg)](https://crates.io/crates/serde-ignored) -[![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/serde_ignored) +[github](https://github.com/dtolnay/serde-ignored) +[crates.io](https://crates.io/crates/serde_ignored) +[docs.rs](https://docs.rs/serde_ignored) +[build status](https://github.com/dtolnay/serde-ignored/actions?query=branch%3Amaster) Find out about keys that are ignored when deserializing data. This crate provides a wrapper that works with any existing Serde `Deserializer` and invokes diff -Nru cargo-0.44.1/vendor/serde_ignored/src/lib.rs cargo-0.47.0/vendor/serde_ignored/src/lib.rs --- cargo-0.44.1/vendor/serde_ignored/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_ignored/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,3 +1,11 @@ +//! [![github]](https://github.com/dtolnay/serde-ignored) [![crates-io]](https://crates.io/crates/serde_ignored) [![docs-rs]](https://docs.rs/serde_ignored) +//! +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust +//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logoColor=white&logo=data:image/svg+xml;base64,PHN2ZyByb2xlPSJpbWciIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDUxMiA1MTIiPjxwYXRoIGZpbGw9IiNmNWY1ZjUiIGQ9Ik00ODguNiAyNTAuMkwzOTIgMjE0VjEwNS41YzAtMTUtOS4zLTI4LjQtMjMuNC0zMy43bC0xMDAtMzcuNWMtOC4xLTMuMS0xNy4xLTMuMS0yNS4zIDBsLTEwMCAzNy41Yy0xNC4xIDUuMy0yMy40IDE4LjctMjMuNCAzMy43VjIxNGwtOTYuNiAzNi4yQzkuMyAyNTUuNSAwIDI2OC45IDAgMjgzLjlWMzk0YzAgMTMuNiA3LjcgMjYuMSAxOS45IDMyLjJsMTAwIDUwYzEwLjEgNS4xIDIyLjEgNS4xIDMyLjIgMGwxMDMuOS01MiAxMDMuOSA1MmMxMC4xIDUuMSAyMi4xIDUuMSAzMi4yIDBsMTAwLTUwYzEyLjItNi4xIDE5LjktMTguNiAxOS45LTMyLjJWMjgzLjljMC0xNS05LjMtMjguNC0yMy40LTMzLjd6TTM1OCAyMTQuOGwtODUgMzEuOXYtNjguMmw4NS0zN3Y3My4zek0xNTQgMTA0LjFsMTAyLTM4LjIgMTAyIDM4LjJ2LjZsLTEwMiA0MS40LTEwMi00MS40di0uNnptODQgMjkxLjFsLTg1IDQyLjV2LTc5LjFsODUtMzguOHY3NS40em0wLTExMmwtMTAyIDQxLjQtMTAyLTQxLjR2LS42bDEwMi0zOC4yIDEwMiAzOC4ydi42em0yNDAgMTEybC04NSA0Mi41di03OS4xbDg1LTM4Ljh2NzUuNHptMC0xMTJsLTEwMiA0MS40LTEwMi00MS40di0uNmwxMDItMzguMiAxMDIgMzguMnYuNnoiPjwvcGF0aD48L3N2Zz4K +//! +//!
+//! //! Find out about keys that are ignored when deserializing data. This crate //! provides a wrapper that works with any existing Serde `Deserializer` and //! invokes a callback on every ignored field. @@ -77,7 +85,7 @@ //! # fn main() { try_main().unwrap() } //! ``` -#![doc(html_root_url = "https://docs.rs/serde_ignored/0.1.1")] +#![doc(html_root_url = "https://docs.rs/serde_ignored/0.1.2")] use serde::de::{self, Deserialize, DeserializeSeed, Visitor}; use std::fmt::{self, Display}; diff -Nru cargo-0.44.1/vendor/serde_json/build.rs cargo-0.47.0/vendor/serde_json/build.rs --- cargo-0.44.1/vendor/serde_json/build.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,46 @@ +use std::env; +use std::process::Command; +use std::str::{self, FromStr}; + +fn main() { + // Decide ideal limb width for arithmetic in the float parser. Refer to + // src/lexical/math.rs for where this has an effect. + let target_arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap(); + match target_arch.as_str() { + "aarch64" | "mips64" | "powerpc64" | "x86_64" => { + println!("cargo:rustc-cfg=limb_width_64"); + } + _ => { + println!("cargo:rustc-cfg=limb_width_32"); + } + } + + let minor = match rustc_minor_version() { + Some(minor) => minor, + None => return, + }; + + // BTreeMap::get_key_value + // https://blog.rust-lang.org/2019/12/19/Rust-1.40.0.html#additions-to-the-standard-library + if minor < 40 { + println!("cargo:rustc-cfg=no_btreemap_get_key_value"); + } + + // BTreeMap::remove_entry + // https://blog.rust-lang.org/2020/07/16/Rust-1.45.0.html#library-changes + if minor < 45 { + println!("cargo:rustc-cfg=no_btreemap_remove_entry"); + } +} + +fn rustc_minor_version() -> Option { + let rustc = env::var_os("RUSTC")?; + let output = Command::new(rustc).arg("--version").output().ok()?; + let version = str::from_utf8(&output.stdout).ok()?; + let mut pieces = version.split('.'); + if pieces.next() != Some("rustc 1") { + return None; + } + let next = pieces.next()?; + u32::from_str(next).ok() +} diff -Nru cargo-0.44.1/vendor/serde_json/.cargo-checksum.json cargo-0.47.0/vendor/serde_json/.cargo-checksum.json --- cargo-0.44.1/vendor/serde_json/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"993948e75b189211a9b31a7528f950c6adc21f9720b6438ff80a7fa2f864cea2"} \ No newline at end of file +{"files":{},"package":"a230ea9107ca2220eea9d46de97eddcb04cd00e92d13dda78e478dd33fa82bd4"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/serde_json/Cargo.toml cargo-0.47.0/vendor/serde_json/Cargo.toml --- cargo-0.44.1/vendor/serde_json/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,11 +13,11 @@ [package] edition = "2018" name = "serde_json" -version = "1.0.53" +version = "1.0.58" authors = ["Erick Tryzelaar ", "David Tolnay "] -include = ["Cargo.toml", "src/**/*.rs", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +include = ["build.rs", "src/**/*.rs", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] description = "A JSON serialization file format" -documentation = "http://docs.serde.rs/serde_json/" +documentation = "https://docs.serde.rs/serde_json/" readme = "README.md" keywords = ["json", "serde", "serialization"] categories = ["encoding"] @@ -30,7 +30,7 @@ [package.metadata.playground] features = ["raw_value"] [dependencies.indexmap] -version = "1.2" +version = "1.5" optional = true [dependencies.itoa] @@ -44,7 +44,7 @@ version = "1.0.100" default-features = false [dev-dependencies.automod] -version = "0.1" +version = "1.0" [dev-dependencies.rustversion] version = "1.0" @@ -66,6 +66,7 @@ alloc = ["serde/alloc"] arbitrary_precision = [] default = ["std"] +float_roundtrip = [] preserve_order = ["indexmap"] raw_value = [] std = ["serde/std"] diff -Nru cargo-0.44.1/vendor/serde_json/README.md cargo-0.47.0/vendor/serde_json/README.md --- cargo-0.44.1/vendor/serde_json/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -196,6 +196,11 @@ when we write `p.phones[0]`, then `p.phones` is guaranteed to be a `Vec` so indexing into it makes sense and produces a `String`. +The necessary setup for using Serde's derive macros is explained on the *[Using +derive]* page of the Serde site. + +[Using derive]: https://serde.rs/derive.html + ## Constructing JSON values Serde JSON provides a [`json!` macro][macro] to build `serde_json::Value` @@ -313,11 +318,23 @@ ## Getting help -Serde developers live in the #serde channel on -[`irc.mozilla.org`](https://wiki.mozilla.org/IRC). The #rust channel is also a -good resource with generally faster response time but less specific knowledge -about Serde. If IRC is not your thing, we are happy to respond to [GitHub -issues](https://github.com/serde-rs/json/issues/new) as well. +Serde is one of the most widely used Rust libraries so any place that Rustaceans +congregate will be able to help you out. For chat, consider trying the +[#general] or [#beginners] channels of the unofficial community Discord, the +[#rust-usage] channel of the official Rust Project Discord, or the +[#general][zulip] stream in Zulip. For asynchronous, consider the [\[rust\] tag +on StackOverflow][stackoverflow], the [/r/rust] subreddit which has a pinned +weekly easy questions post, or the Rust [Discourse forum][discourse]. It's +acceptable to file a support issue in this repo but they tend not to get as many +eyes as any of the above and may get closed without a response after some time. + +[#general]: https://discord.com/channels/273534239310479360/274215136414400513 +[#beginners]: https://discord.com/channels/273534239310479360/273541522815713281 +[#rust-usage]: https://discord.com/channels/442252698964721669/443150878111694848 +[zulip]: https://rust-lang.zulipchat.com/#narrow/stream/122651-general +[stackoverflow]: https://stackoverflow.com/questions/tagged/rust +[/r/rust]: https://www.reddit.com/r/rust +[discourse]: https://users.rust-lang.org ## No-std support diff -Nru cargo-0.44.1/vendor/serde_json/src/de.rs cargo-0.47.0/vendor/serde_json/src/de.rs --- cargo-0.44.1/vendor/serde_json/src/de.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/de.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,6 +1,8 @@ //! Deserialize JSON data to a Rust data structure. use crate::error::{Error, ErrorCode, Result}; +#[cfg(feature = "float_roundtrip")] +use crate::lexical; use crate::lib::str::FromStr; use crate::lib::*; use crate::number::Number; @@ -23,6 +25,8 @@ read: R, scratch: Vec, remaining_depth: u8, + #[cfg(feature = "float_roundtrip")] + single_precision: bool, #[cfg(feature = "unbounded_depth")] disable_recursion_limit: bool, } @@ -40,23 +44,14 @@ /// - Deserializer::from_bytes /// - Deserializer::from_reader pub fn new(read: R) -> Self { - #[cfg(not(feature = "unbounded_depth"))] - { - Deserializer { - read: read, - scratch: Vec::new(), - remaining_depth: 128, - } - } - - #[cfg(feature = "unbounded_depth")] - { - Deserializer { - read: read, - scratch: Vec::new(), - remaining_depth: 128, - disable_recursion_limit: false, - } + Deserializer { + read, + scratch: Vec::new(), + remaining_depth: 128, + #[cfg(feature = "float_roundtrip")] + single_precision: false, + #[cfg(feature = "unbounded_depth")] + disable_recursion_limit: false, } } } @@ -150,7 +145,7 @@ let offset = self.read.byte_offset(); StreamDeserializer { de: self, - offset: offset, + offset, failed: false, output: PhantomData, lifetime: PhantomData, @@ -305,7 +300,7 @@ self.fix_position(err) } - fn deserialize_prim_number(&mut self, visitor: V) -> Result + fn deserialize_number(&mut self, visitor: V) -> Result where V: de::Visitor<'de>, { @@ -398,29 +393,28 @@ } } c @ b'1'..=b'9' => { - let mut res = (c - b'0') as u64; + let mut significand = (c - b'0') as u64; loop { match tri!(self.peek_or_null()) { c @ b'0'..=b'9' => { - self.eat_char(); let digit = (c - b'0') as u64; - // We need to be careful with overflow. If we can, try to keep the - // number as a `u64` until we grow too large. At that point, switch to - // parsing the value as a `f64`. - if overflow!(res * 10 + digit, u64::max_value()) { - return Ok(ParserNumber::F64(tri!(self.parse_long_integer( - positive, - res, - 1, // res * 10^1 - )))); + // We need to be careful with overflow. If we can, + // try to keep the number as a `u64` until we grow + // too large. At that point, switch to parsing the + // value as a `f64`. + if overflow!(significand * 10 + digit, u64::max_value()) { + return Ok(ParserNumber::F64(tri!( + self.parse_long_integer(positive, significand), + ))); } - res = res * 10 + digit; + self.eat_char(); + significand = significand * 10 + digit; } _ => { - return self.parse_number(positive, res); + return self.parse_number(positive, significand); } } } @@ -429,33 +423,6 @@ } } - fn parse_long_integer( - &mut self, - positive: bool, - significand: u64, - mut exponent: i32, - ) -> Result { - loop { - match tri!(self.peek_or_null()) { - b'0'..=b'9' => { - self.eat_char(); - // This could overflow... if your integer is gigabytes long. - // Ignore that possibility. - exponent += 1; - } - b'.' => { - return self.parse_decimal(positive, significand, exponent); - } - b'e' | b'E' => { - return self.parse_exponent(positive, significand, exponent); - } - _ => { - return self.f64_from_parts(positive, significand, exponent); - } - } - } - } - fn parse_number(&mut self, positive: bool, significand: u64) -> Result { Ok(match tri!(self.peek_or_null()) { b'.' => ParserNumber::F64(tri!(self.parse_decimal(positive, significand, 0))), @@ -485,26 +452,20 @@ ) -> Result { self.eat_char(); - let mut at_least_one_digit = false; while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) { - self.eat_char(); let digit = (c - b'0') as u64; - at_least_one_digit = true; if overflow!(significand * 10 + digit, u64::max_value()) { - // The next multiply/add would overflow, so just ignore all - // further digits. - while let b'0'..=b'9' = tri!(self.peek_or_null()) { - self.eat_char(); - } - break; + return self.parse_decimal_overflow(positive, significand, exponent); } + self.eat_char(); significand = significand * 10 + digit; exponent -= 1; } - if !at_least_one_digit { + // Error if there is not at least one digit after the decimal point. + if exponent == 0 { match tri!(self.peek()) { Some(_) => return Err(self.peek_error(ErrorCode::InvalidNumber)), None => return Err(self.peek_error(ErrorCode::EofWhileParsingValue)), @@ -557,7 +518,8 @@ let digit = (c - b'0') as i32; if overflow!(exp * 10 + digit, i32::max_value()) { - return self.parse_exponent_overflow(positive, significand, positive_exp); + let zero_significand = significand == 0; + return self.parse_exponent_overflow(positive, zero_significand, positive_exp); } exp = exp * 10 + digit; @@ -572,6 +534,238 @@ self.f64_from_parts(positive, significand, final_exp) } + #[cfg(feature = "float_roundtrip")] + fn f64_from_parts(&mut self, positive: bool, significand: u64, exponent: i32) -> Result { + let f = if self.single_precision { + lexical::parse_concise_float::(significand, exponent) as f64 + } else { + lexical::parse_concise_float::(significand, exponent) + }; + + if f.is_infinite() { + Err(self.error(ErrorCode::NumberOutOfRange)) + } else { + Ok(if positive { f } else { -f }) + } + } + + #[cfg(not(feature = "float_roundtrip"))] + fn f64_from_parts( + &mut self, + positive: bool, + significand: u64, + mut exponent: i32, + ) -> Result { + let mut f = significand as f64; + loop { + match POW10.get(exponent.wrapping_abs() as usize) { + Some(&pow) => { + if exponent >= 0 { + f *= pow; + if f.is_infinite() { + return Err(self.error(ErrorCode::NumberOutOfRange)); + } + } else { + f /= pow; + } + break; + } + None => { + if f == 0.0 { + break; + } + if exponent >= 0 { + return Err(self.error(ErrorCode::NumberOutOfRange)); + } + f /= 1e308; + exponent += 308; + } + } + } + Ok(if positive { f } else { -f }) + } + + #[cfg(feature = "float_roundtrip")] + #[cold] + #[inline(never)] + fn parse_long_integer(&mut self, positive: bool, partial_significand: u64) -> Result { + // To deserialize floats we'll first push the integer and fraction + // parts, both as byte strings, into the scratch buffer and then feed + // both slices to lexical's parser. For example if the input is + // `12.34e5` we'll push b"1234" into scratch and then pass b"12" and + // b"34" to lexical. `integer_end` will be used to track where to split + // the scratch buffer. + // + // Note that lexical expects the integer part to contain *no* leading + // zeroes and the fraction part to contain *no* trailing zeroes. The + // first requirement is already handled by the integer parsing logic. + // The second requirement will be enforced just before passing the + // slices to lexical in f64_long_from_parts. + self.scratch.clear(); + self.scratch + .extend_from_slice(itoa::Buffer::new().format(partial_significand).as_bytes()); + + loop { + match tri!(self.peek_or_null()) { + c @ b'0'..=b'9' => { + self.scratch.push(c); + self.eat_char(); + } + b'.' => { + self.eat_char(); + return self.parse_long_decimal(positive, self.scratch.len()); + } + b'e' | b'E' => { + return self.parse_long_exponent(positive, self.scratch.len()); + } + _ => { + return self.f64_long_from_parts(positive, self.scratch.len(), 0); + } + } + } + } + + #[cfg(not(feature = "float_roundtrip"))] + #[cold] + #[inline(never)] + fn parse_long_integer(&mut self, positive: bool, significand: u64) -> Result { + let mut exponent = 0; + loop { + match tri!(self.peek_or_null()) { + b'0'..=b'9' => { + self.eat_char(); + // This could overflow... if your integer is gigabytes long. + // Ignore that possibility. + exponent += 1; + } + b'.' => { + return self.parse_decimal(positive, significand, exponent); + } + b'e' | b'E' => { + return self.parse_exponent(positive, significand, exponent); + } + _ => { + return self.f64_from_parts(positive, significand, exponent); + } + } + } + } + + #[cfg(feature = "float_roundtrip")] + #[cold] + fn parse_long_decimal(&mut self, positive: bool, integer_end: usize) -> Result { + let mut at_least_one_digit = integer_end < self.scratch.len(); + while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) { + self.scratch.push(c); + self.eat_char(); + at_least_one_digit = true; + } + + if !at_least_one_digit { + match tri!(self.peek()) { + Some(_) => return Err(self.peek_error(ErrorCode::InvalidNumber)), + None => return Err(self.peek_error(ErrorCode::EofWhileParsingValue)), + } + } + + match tri!(self.peek_or_null()) { + b'e' | b'E' => self.parse_long_exponent(positive, integer_end), + _ => self.f64_long_from_parts(positive, integer_end, 0), + } + } + + #[cfg(feature = "float_roundtrip")] + fn parse_long_exponent(&mut self, positive: bool, integer_end: usize) -> Result { + self.eat_char(); + + let positive_exp = match tri!(self.peek_or_null()) { + b'+' => { + self.eat_char(); + true + } + b'-' => { + self.eat_char(); + false + } + _ => true, + }; + + let next = match tri!(self.next_char()) { + Some(b) => b, + None => { + return Err(self.error(ErrorCode::EofWhileParsingValue)); + } + }; + + // Make sure a digit follows the exponent place. + let mut exp = match next { + c @ b'0'..=b'9' => (c - b'0') as i32, + _ => { + return Err(self.error(ErrorCode::InvalidNumber)); + } + }; + + while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) { + self.eat_char(); + let digit = (c - b'0') as i32; + + if overflow!(exp * 10 + digit, i32::max_value()) { + let zero_significand = self.scratch.iter().all(|&digit| digit == b'0'); + return self.parse_exponent_overflow(positive, zero_significand, positive_exp); + } + + exp = exp * 10 + digit; + } + + let final_exp = if positive_exp { exp } else { -exp }; + + self.f64_long_from_parts(positive, integer_end, final_exp) + } + + // This cold code should not be inlined into the middle of the hot + // decimal-parsing loop above. + #[cfg(feature = "float_roundtrip")] + #[cold] + #[inline(never)] + fn parse_decimal_overflow( + &mut self, + positive: bool, + significand: u64, + exponent: i32, + ) -> Result { + let mut buffer = itoa::Buffer::new(); + let significand = buffer.format(significand); + let fraction_digits = -exponent as usize; + self.scratch.clear(); + if let Some(zeros) = fraction_digits.checked_sub(significand.len() + 1) { + self.scratch.extend(iter::repeat(b'0').take(zeros + 1)); + } + self.scratch.extend_from_slice(significand.as_bytes()); + let integer_end = self.scratch.len() - fraction_digits; + self.parse_long_decimal(positive, integer_end) + } + + #[cfg(not(feature = "float_roundtrip"))] + #[cold] + #[inline(never)] + fn parse_decimal_overflow( + &mut self, + positive: bool, + significand: u64, + exponent: i32, + ) -> Result { + // The next multiply/add would overflow, so just ignore all further + // digits. + while let b'0'..=b'9' = tri!(self.peek_or_null()) { + self.eat_char(); + } + + match tri!(self.peek_or_null()) { + b'e' | b'E' => self.parse_exponent(positive, significand, exponent), + _ => self.f64_from_parts(positive, significand, exponent), + } + } + // This cold code should not be inlined into the middle of the hot // exponent-parsing loop above. #[cold] @@ -579,11 +773,11 @@ fn parse_exponent_overflow( &mut self, positive: bool, - significand: u64, + zero_significand: bool, positive_exp: bool, ) -> Result { // Error instead of +/- infinity. - if significand != 0 && positive_exp { + if !zero_significand && positive_exp { return Err(self.error(ErrorCode::NumberOutOfRange)); } @@ -593,6 +787,29 @@ Ok(if positive { 0.0 } else { -0.0 }) } + #[cfg(feature = "float_roundtrip")] + fn f64_long_from_parts( + &mut self, + positive: bool, + integer_end: usize, + exponent: i32, + ) -> Result { + let integer = &self.scratch[..integer_end]; + let fraction = &self.scratch[integer_end..]; + + let f = if self.single_precision { + lexical::parse_truncated_float::(integer, fraction, exponent) as f64 + } else { + lexical::parse_truncated_float::(integer, fraction, exponent) + }; + + if f.is_infinite() { + Err(self.error(ErrorCode::NumberOutOfRange)) + } else { + Ok(if positive { f } else { -f }) + } + } + fn parse_any_signed_number(&mut self) -> Result { let peek = match tri!(self.peek()) { Some(b) => b, @@ -743,41 +960,6 @@ Ok(()) } - fn f64_from_parts( - &mut self, - positive: bool, - significand: u64, - mut exponent: i32, - ) -> Result { - let mut f = significand as f64; - loop { - match POW10.get(exponent.wrapping_abs() as usize) { - Some(&pow) => { - if exponent >= 0 { - f *= pow; - if f.is_infinite() { - return Err(self.error(ErrorCode::NumberOutOfRange)); - } - } else { - f /= pow; - } - break; - } - None => { - if f == 0.0 { - break; - } - if exponent >= 0 { - return Err(self.error(ErrorCode::NumberOutOfRange)); - } - f /= 1e308; - exponent += 308; - } - } - } - Ok(if positive { f } else { -f }) - } - fn parse_object_colon(&mut self) -> Result<()> { match tri!(self.parse_whitespace()) { Some(b':') => { @@ -1023,8 +1205,7 @@ } } -// Clippy bug: https://github.com/rust-lang/rust-clippy/issues/5201 -#[allow(clippy::excessive_precision)] +#[cfg(not(feature = "float_roundtrip"))] static POW10: [f64; 309] = [ 1e000, 1e001, 1e002, 1e003, 1e004, 1e005, 1e006, 1e007, 1e008, 1e009, // 1e010, 1e011, 1e012, 1e013, 1e014, 1e015, 1e016, 1e017, 1e018, 1e019, // @@ -1059,13 +1240,13 @@ 1e300, 1e301, 1e302, 1e303, 1e304, 1e305, 1e306, 1e307, 1e308, ]; -macro_rules! deserialize_prim_number { +macro_rules! deserialize_number { ($method:ident) => { fn $method(self, visitor: V) -> Result where V: de::Visitor<'de>, { - self.deserialize_prim_number(visitor) + self.deserialize_number(visitor) } }; } @@ -1214,16 +1395,28 @@ } } - deserialize_prim_number!(deserialize_i8); - deserialize_prim_number!(deserialize_i16); - deserialize_prim_number!(deserialize_i32); - deserialize_prim_number!(deserialize_i64); - deserialize_prim_number!(deserialize_u8); - deserialize_prim_number!(deserialize_u16); - deserialize_prim_number!(deserialize_u32); - deserialize_prim_number!(deserialize_u64); - deserialize_prim_number!(deserialize_f32); - deserialize_prim_number!(deserialize_f64); + deserialize_number!(deserialize_i8); + deserialize_number!(deserialize_i16); + deserialize_number!(deserialize_i32); + deserialize_number!(deserialize_i64); + deserialize_number!(deserialize_u8); + deserialize_number!(deserialize_u16); + deserialize_number!(deserialize_u32); + deserialize_number!(deserialize_u64); + #[cfg(not(feature = "float_roundtrip"))] + deserialize_number!(deserialize_f32); + deserialize_number!(deserialize_f64); + + #[cfg(feature = "float_roundtrip")] + fn deserialize_f32(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + self.single_precision = true; + let val = self.deserialize_number(visitor); + self.single_precision = false; + val + } serde_if_integer128! { fn deserialize_i128(self, visitor: V) -> Result @@ -1695,10 +1888,7 @@ impl<'a, R: 'a> SeqAccess<'a, R> { fn new(de: &'a mut Deserializer) -> Self { - SeqAccess { - de: de, - first: true, - } + SeqAccess { de, first: true } } } @@ -1745,10 +1935,7 @@ impl<'a, R: 'a> MapAccess<'a, R> { fn new(de: &'a mut Deserializer) -> Self { - MapAccess { - de: de, - first: true, - } + MapAccess { de, first: true } } } @@ -1804,7 +1991,7 @@ impl<'a, R: 'a> VariantAccess<'a, R> { fn new(de: &'a mut Deserializer) -> Self { - VariantAccess { de: de } + VariantAccess { de } } } @@ -1857,7 +2044,7 @@ impl<'a, R: 'a> UnitVariantAccess<'a, R> { fn new(de: &'a mut Deserializer) -> Self { - UnitVariantAccess { de: de } + UnitVariantAccess { de } } } @@ -2069,7 +2256,7 @@ let offset = read.byte_offset(); StreamDeserializer { de: Deserializer::new(read), - offset: offset, + offset, failed: false, output: PhantomData, lifetime: PhantomData, diff -Nru cargo-0.44.1/vendor/serde_json/src/error.rs cargo-0.47.0/vendor/serde_json/src/error.rs --- cargo-0.44.1/vendor/serde_json/src/error.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/error.rs 2020-10-01 21:38:28.000000000 +0000 @@ -245,11 +245,7 @@ #[cold] pub(crate) fn syntax(code: ErrorCode, line: usize, column: usize) -> Self { Error { - err: Box::new(ErrorImpl { - code: code, - line: line, - column: column, - }), + err: Box::new(ErrorImpl { code, line, column }), } } @@ -388,8 +384,8 @@ Error { err: Box::new(ErrorImpl { code: ErrorCode::Message(msg.into_boxed_str()), - line: line, - column: column, + line, + column, }), } } diff -Nru cargo-0.44.1/vendor/serde_json/src/iter.rs cargo-0.47.0/vendor/serde_json/src/iter.rs --- cargo-0.44.1/vendor/serde_json/src/iter.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/iter.rs 2020-10-01 21:38:28.000000000 +0000 @@ -25,7 +25,7 @@ { pub fn new(iter: I) -> LineColIterator { LineColIterator { - iter: iter, + iter, line: 1, col: 0, start_of_line: 0, diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/algorithm.rs cargo-0.47.0/vendor/serde_json/src/lexical/algorithm.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/algorithm.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/algorithm.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,193 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +//! Algorithms to efficiently convert strings to floats. + +use super::bhcomp::*; +use super::cached::*; +use super::errors::*; +use super::float::ExtendedFloat; +use super::num::*; +use super::small_powers::*; + +// FAST +// ---- + +/// Convert mantissa to exact value for a non-base2 power. +/// +/// Returns the resulting float and if the value can be represented exactly. +pub(crate) fn fast_path(mantissa: u64, exponent: i32) -> Option +where + F: Float, +{ + // `mantissa >> (F::MANTISSA_SIZE+1) != 0` effectively checks if the + // value has a no bits above the hidden bit, which is what we want. + let (min_exp, max_exp) = F::exponent_limit(); + let shift_exp = F::mantissa_limit(); + let mantissa_size = F::MANTISSA_SIZE + 1; + if mantissa == 0 { + Some(F::ZERO) + } else if mantissa >> mantissa_size != 0 { + // Would require truncation of the mantissa. + None + } else if exponent == 0 { + // 0 exponent, same as value, exact representation. + let float = F::as_cast(mantissa); + Some(float) + } else if exponent >= min_exp && exponent <= max_exp { + // Value can be exactly represented, return the value. + // Do not use powi, since powi can incrementally introduce + // error. + let float = F::as_cast(mantissa); + Some(float.pow10(exponent)) + } else if exponent >= 0 && exponent <= max_exp + shift_exp { + // Check to see if we have a disguised fast-path, where the + // number of digits in the mantissa is very small, but and + // so digits can be shifted from the exponent to the mantissa. + // https://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/ + let small_powers = POW10_64; + let shift = exponent - max_exp; + let power = small_powers[shift as usize]; + + // Compute the product of the power, if it overflows, + // prematurely return early, otherwise, if we didn't overshoot, + // we can get an exact value. + let value = mantissa.checked_mul(power)?; + if value >> mantissa_size != 0 { + None + } else { + // Use powi, since it's correct, and faster on + // the fast-path. + let float = F::as_cast(value); + Some(float.pow10(max_exp)) + } + } else { + // Cannot be exactly represented, exponent too small or too big, + // would require truncation. + None + } +} + +// MODERATE +// -------- + +/// Multiply the floating-point by the exponent. +/// +/// Multiply by pre-calculated powers of the base, modify the extended- +/// float, and return if new value and if the value can be represented +/// accurately. +fn multiply_exponent_extended(fp: &mut ExtendedFloat, exponent: i32, truncated: bool) -> bool +where + F: Float, +{ + let powers = ExtendedFloat::get_powers(); + let exponent = exponent.saturating_add(powers.bias); + let small_index = exponent % powers.step; + let large_index = exponent / powers.step; + if exponent < 0 { + // Guaranteed underflow (assign 0). + fp.mant = 0; + true + } else if large_index as usize >= powers.large.len() { + // Overflow (assign infinity) + fp.mant = 1 << 63; + fp.exp = 0x7FF; + true + } else { + // Within the valid exponent range, multiply by the large and small + // exponents and return the resulting value. + + // Track errors to as a factor of unit in last-precision. + let mut errors: u32 = 0; + if truncated { + errors += u64::error_halfscale(); + } + + // Multiply by the small power. + // Check if we can directly multiply by an integer, if not, + // use extended-precision multiplication. + match fp + .mant + .overflowing_mul(powers.get_small_int(small_index as usize)) + { + // Overflow, multiplication unsuccessful, go slow path. + (_, true) => { + fp.normalize(); + fp.imul(&powers.get_small(small_index as usize)); + errors += u64::error_halfscale(); + } + // No overflow, multiplication successful. + (mant, false) => { + fp.mant = mant; + fp.normalize(); + } + } + + // Multiply by the large power + fp.imul(&powers.get_large(large_index as usize)); + if errors > 0 { + errors += 1; + } + errors += u64::error_halfscale(); + + // Normalize the floating point (and the errors). + let shift = fp.normalize(); + errors <<= shift; + + u64::error_is_accurate::(errors, &fp) + } +} + +/// Create a precise native float using an intermediate extended-precision float. +/// +/// Return the float approximation and if the value can be accurately +/// represented with mantissa bits of precision. +#[inline] +pub(crate) fn moderate_path( + mantissa: u64, + exponent: i32, + truncated: bool, +) -> (ExtendedFloat, bool) +where + F: Float, +{ + let mut fp = ExtendedFloat { + mant: mantissa, + exp: 0, + }; + let valid = multiply_exponent_extended::(&mut fp, exponent, truncated); + (fp, valid) +} + +// FALLBACK +// -------- + +/// Fallback path when the fast path does not work. +/// +/// Uses the moderate path, if applicable, otherwise, uses the slow path +/// as required. +pub(crate) fn fallback_path( + integer: &[u8], + fraction: &[u8], + mantissa: u64, + exponent: i32, + mantissa_exponent: i32, + truncated: bool, +) -> F +where + F: Float, +{ + // Moderate path (use an extended 80-bit representation). + let (fp, valid) = moderate_path::(mantissa, mantissa_exponent, truncated); + if valid { + return fp.into_float::(); + } + + // Slow path, fast path didn't work. + let b = fp.into_downward_float::(); + if b.is_special() { + // We have a non-finite number, we get to leave early. + b + } else { + bhcomp(b, integer, fraction, exponent) + } +} diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/bhcomp.rs cargo-0.47.0/vendor/serde_json/src/lexical/bhcomp.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/bhcomp.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/bhcomp.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,218 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +//! Compare the mantissa to the halfway representation of the float. +//! +//! Compares the actual significant digits of the mantissa to the +//! theoretical digits from `b+h`, scaled into the proper range. + +use super::bignum::*; +use super::digit::*; +use super::exponent::*; +use super::float::*; +use super::math::*; +use super::num::*; +use super::rounding::*; +use crate::lib::{cmp, mem}; + +// MANTISSA + +/// Parse the full mantissa into a big integer. +/// +/// Max digits is the maximum number of digits plus one. +fn parse_mantissa(integer: &[u8], fraction: &[u8]) -> Bigint +where + F: Float, +{ + // Main loop + let small_powers = POW10_LIMB; + let step = small_powers.len() - 2; + let max_digits = F::MAX_DIGITS - 1; + let mut counter = 0; + let mut value: Limb = 0; + let mut i: usize = 0; + let mut result = Bigint::default(); + + // Iteratively process all the data in the mantissa. + for &digit in integer.iter().chain(fraction) { + // We've parsed the max digits using small values, add to bignum + if counter == step { + result.imul_small(small_powers[counter]); + result.iadd_small(value); + counter = 0; + value = 0; + } + + value *= 10; + value += as_limb(to_digit(digit).unwrap()); + + i += 1; + counter += 1; + if i == max_digits { + break; + } + } + + // We will always have a remainder, as long as we entered the loop + // once, or counter % step is 0. + if counter != 0 { + result.imul_small(small_powers[counter]); + result.iadd_small(value); + } + + // If we have any remaining digits after the last value, we need + // to add a 1 after the rest of the array, it doesn't matter where, + // just move it up. This is good for the worst-possible float + // representation. We also need to return an index. + // Since we already trimmed trailing zeros, we know there has + // to be a non-zero digit if there are any left. + if i < integer.len() + fraction.len() { + result.imul_small(10); + result.iadd_small(1); + } + + result +} + +// FLOAT OPS + +/// Calculate `b` from a a representation of `b` as a float. +#[inline] +pub(super) fn b_extended(f: F) -> ExtendedFloat { + ExtendedFloat::from_float(f) +} + +/// Calculate `b+h` from a a representation of `b` as a float. +#[inline] +pub(super) fn bh_extended(f: F) -> ExtendedFloat { + // None of these can overflow. + let b = b_extended(f); + ExtendedFloat { + mant: (b.mant << 1) + 1, + exp: b.exp - 1, + } +} + +// ROUNDING + +/// Custom round-nearest, tie-event algorithm for bhcomp. +#[inline] +fn round_nearest_tie_even(fp: &mut ExtendedFloat, shift: i32, is_truncated: bool) { + let (mut is_above, mut is_halfway) = round_nearest(fp, shift); + if is_halfway && is_truncated { + is_above = true; + is_halfway = false; + } + tie_even(fp, is_above, is_halfway); +} + +// BHCOMP + +/// Calculate the mantissa for a big integer with a positive exponent. +fn large_atof(mantissa: Bigint, exponent: i32) -> F +where + F: Float, +{ + let bits = mem::size_of::() * 8; + + // Simple, we just need to multiply by the power of the radix. + // Now, we can calculate the mantissa and the exponent from this. + // The binary exponent is the binary exponent for the mantissa + // shifted to the hidden bit. + let mut bigmant = mantissa; + bigmant.imul_pow10(exponent as u32); + + // Get the exact representation of the float from the big integer. + let (mant, is_truncated) = bigmant.hi64(); + let exp = bigmant.bit_length() as i32 - bits as i32; + let mut fp = ExtendedFloat { mant, exp }; + fp.round_to_native::(|fp, shift| round_nearest_tie_even(fp, shift, is_truncated)); + into_float(fp) +} + +/// Calculate the mantissa for a big integer with a negative exponent. +/// +/// This invokes the comparison with `b+h`. +fn small_atof(mantissa: Bigint, exponent: i32, f: F) -> F +where + F: Float, +{ + // Get the significant digits and radix exponent for the real digits. + let mut real_digits = mantissa; + let real_exp = exponent; + debug_assert!(real_exp < 0); + + // Get the significant digits and the binary exponent for `b+h`. + let theor = bh_extended(f); + let mut theor_digits = Bigint::from_u64(theor.mant); + let theor_exp = theor.exp; + + // We need to scale the real digits and `b+h` digits to be the same + // order. We currently have `real_exp`, in `radix`, that needs to be + // shifted to `theor_digits` (since it is negative), and `theor_exp` + // to either `theor_digits` or `real_digits` as a power of 2 (since it + // may be positive or negative). Try to remove as many powers of 2 + // as possible. All values are relative to `theor_digits`, that is, + // reflect the power you need to multiply `theor_digits` by. + + // Can remove a power-of-two, since the radix is 10. + // Both are on opposite-sides of equation, can factor out a + // power of two. + // + // Example: 10^-10, 2^-10 -> ( 0, 10, 0) + // Example: 10^-10, 2^-15 -> (-5, 10, 0) + // Example: 10^-10, 2^-5 -> ( 5, 10, 0) + // Example: 10^-10, 2^5 -> (15, 10, 0) + let binary_exp = theor_exp - real_exp; + let halfradix_exp = -real_exp; + let radix_exp = 0; + + // Carry out our multiplication. + if halfradix_exp != 0 { + theor_digits.imul_pow5(halfradix_exp as u32); + } + if radix_exp != 0 { + theor_digits.imul_pow10(radix_exp as u32); + } + if binary_exp > 0 { + theor_digits.imul_pow2(binary_exp as u32); + } else if binary_exp < 0 { + real_digits.imul_pow2(-binary_exp as u32); + } + + // Compare real digits to theoretical digits and round the float. + match real_digits.compare(&theor_digits) { + cmp::Ordering::Greater => f.next_positive(), + cmp::Ordering::Less => f, + cmp::Ordering::Equal => f.round_positive_even(), + } +} + +/// Calculate the exact value of the float. +/// +/// Note: fraction must not have trailing zeros. +pub(crate) fn bhcomp(b: F, integer: &[u8], mut fraction: &[u8], exponent: i32) -> F +where + F: Float, +{ + // Calculate the number of integer digits and use that to determine + // where the significant digits start in the fraction. + let integer_digits = integer.len(); + let fraction_digits = fraction.len(); + let digits_start = if integer_digits == 0 { + let start = fraction.iter().take_while(|&x| *x == b'0').count(); + fraction = &fraction[start..]; + start + } else { + 0 + }; + let sci_exp = scientific_exponent(exponent, integer_digits, digits_start); + let count = F::MAX_DIGITS.min(integer_digits + fraction_digits - digits_start); + let scaled_exponent = sci_exp + 1 - count as i32; + + let mantissa = parse_mantissa::(integer, fraction); + if scaled_exponent >= 0 { + large_atof(mantissa, scaled_exponent) + } else { + small_atof(mantissa, scaled_exponent, b) + } +} diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/bignum.rs cargo-0.47.0/vendor/serde_json/src/lexical/bignum.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/bignum.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/bignum.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,33 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +//! Big integer type definition. + +use super::math::*; +use crate::lib::Vec; + +/// Storage for a big integer type. +#[derive(Clone, PartialEq, Eq)] +pub(crate) struct Bigint { + /// Internal storage for the Bigint, in little-endian order. + pub(crate) data: Vec, +} + +impl Default for Bigint { + fn default() -> Self { + Bigint { + data: Vec::with_capacity(20), + } + } +} + +impl Math for Bigint { + #[inline] + fn data(&self) -> &Vec { + &self.data + } + + #[inline] + fn data_mut(&mut self) -> &mut Vec { + &mut self.data + } +} diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/cached_float80.rs cargo-0.47.0/vendor/serde_json/src/lexical/cached_float80.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/cached_float80.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/cached_float80.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,206 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +//! Cached exponents for basen values with 80-bit extended floats. +//! +//! Exact versions of base**n as an extended-precision float, with both +//! large and small powers. Use the large powers to minimize the amount +//! of compounded error. +//! +//! These values were calculated using Python, using the arbitrary-precision +//! integer to calculate exact extended-representation of each value. +//! These values are all normalized. + +use super::cached::{ExtendedFloatArray, ModeratePathPowers}; + +// LOW-LEVEL +// --------- + +// BASE10 + +const BASE10_SMALL_MANTISSA: [u64; 10] = [ + 9223372036854775808, // 10^0 + 11529215046068469760, // 10^1 + 14411518807585587200, // 10^2 + 18014398509481984000, // 10^3 + 11258999068426240000, // 10^4 + 14073748835532800000, // 10^5 + 17592186044416000000, // 10^6 + 10995116277760000000, // 10^7 + 13743895347200000000, // 10^8 + 17179869184000000000, // 10^9 +]; +const BASE10_SMALL_EXPONENT: [i32; 10] = [ + -63, // 10^0 + -60, // 10^1 + -57, // 10^2 + -54, // 10^3 + -50, // 10^4 + -47, // 10^5 + -44, // 10^6 + -40, // 10^7 + -37, // 10^8 + -34, // 10^9 +]; +const BASE10_LARGE_MANTISSA: [u64; 66] = [ + 11555125961253852697, // 10^-350 + 13451937075301367670, // 10^-340 + 15660115838168849784, // 10^-330 + 18230774251475056848, // 10^-320 + 10611707258198326947, // 10^-310 + 12353653155963782858, // 10^-300 + 14381545078898527261, // 10^-290 + 16742321987285426889, // 10^-280 + 9745314011399999080, // 10^-270 + 11345038669416679861, // 10^-260 + 13207363278391631158, // 10^-250 + 15375394465392026070, // 10^-240 + 17899314949046850752, // 10^-230 + 10418772551374772303, // 10^-220 + 12129047596099288555, // 10^-210 + 14120069793541087484, // 10^-200 + 16437924692338667210, // 10^-190 + 9568131466127621947, // 10^-180 + 11138771039116687545, // 10^-170 + 12967236152753102995, // 10^-160 + 15095849699286165408, // 10^-150 + 17573882009934360870, // 10^-140 + 10229345649675443343, // 10^-130 + 11908525658859223294, // 10^-120 + 13863348470604074297, // 10^-110 + 16139061738043178685, // 10^-100 + 9394170331095332911, // 10^-90 + 10936253623915059621, // 10^-80 + 12731474852090538039, // 10^-70 + 14821387422376473014, // 10^-60 + 17254365866976409468, // 10^-50 + 10043362776618689222, // 10^-40 + 11692013098647223345, // 10^-30 + 13611294676837538538, // 10^-20 + 15845632502852867518, // 10^-10 + 9223372036854775808, // 10^0 + 10737418240000000000, // 10^10 + 12500000000000000000, // 10^20 + 14551915228366851806, // 10^30 + 16940658945086006781, // 10^40 + 9860761315262647567, // 10^50 + 11479437019748901445, // 10^60 + 13363823550460978230, // 10^70 + 15557538194652854267, // 10^80 + 18111358157653424735, // 10^90 + 10542197943230523224, // 10^100 + 12272733663244316382, // 10^110 + 14287342391028437277, // 10^120 + 16632655625031838749, // 10^130 + 9681479787123295682, // 10^140 + 11270725851789228247, // 10^150 + 13120851772591970218, // 10^160 + 15274681817498023410, // 10^170 + 17782069995880619867, // 10^180 + 10350527006597618960, // 10^190 + 12049599325514420588, // 10^200 + 14027579833653779454, // 10^210 + 16330252207878254650, // 10^220 + 9505457831475799117, // 10^230 + 11065809325636130661, // 10^240 + 12882297539194266616, // 10^250 + 14996968138956309548, // 10^260 + 17458768723248864463, // 10^270 + 10162340898095201970, // 10^280 + 11830521861667747109, // 10^290 + 13772540099066387756, // 10^300 +]; +const BASE10_LARGE_EXPONENT: [i32; 66] = [ + -1226, // 10^-350 + -1193, // 10^-340 + -1160, // 10^-330 + -1127, // 10^-320 + -1093, // 10^-310 + -1060, // 10^-300 + -1027, // 10^-290 + -994, // 10^-280 + -960, // 10^-270 + -927, // 10^-260 + -894, // 10^-250 + -861, // 10^-240 + -828, // 10^-230 + -794, // 10^-220 + -761, // 10^-210 + -728, // 10^-200 + -695, // 10^-190 + -661, // 10^-180 + -628, // 10^-170 + -595, // 10^-160 + -562, // 10^-150 + -529, // 10^-140 + -495, // 10^-130 + -462, // 10^-120 + -429, // 10^-110 + -396, // 10^-100 + -362, // 10^-90 + -329, // 10^-80 + -296, // 10^-70 + -263, // 10^-60 + -230, // 10^-50 + -196, // 10^-40 + -163, // 10^-30 + -130, // 10^-20 + -97, // 10^-10 + -63, // 10^0 + -30, // 10^10 + 3, // 10^20 + 36, // 10^30 + 69, // 10^40 + 103, // 10^50 + 136, // 10^60 + 169, // 10^70 + 202, // 10^80 + 235, // 10^90 + 269, // 10^100 + 302, // 10^110 + 335, // 10^120 + 368, // 10^130 + 402, // 10^140 + 435, // 10^150 + 468, // 10^160 + 501, // 10^170 + 534, // 10^180 + 568, // 10^190 + 601, // 10^200 + 634, // 10^210 + 667, // 10^220 + 701, // 10^230 + 734, // 10^240 + 767, // 10^250 + 800, // 10^260 + 833, // 10^270 + 867, // 10^280 + 900, // 10^290 + 933, // 10^300 +]; +const BASE10_SMALL_INT_POWERS: [u64; 10] = [ + 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, +]; +const BASE10_STEP: i32 = 10; +const BASE10_BIAS: i32 = 350; + +// HIGH LEVEL +// ---------- + +const BASE10_POWERS: ModeratePathPowers = ModeratePathPowers { + small: ExtendedFloatArray { + mant: &BASE10_SMALL_MANTISSA, + exp: &BASE10_SMALL_EXPONENT, + }, + large: ExtendedFloatArray { + mant: &BASE10_LARGE_MANTISSA, + exp: &BASE10_LARGE_EXPONENT, + }, + small_int: &BASE10_SMALL_INT_POWERS, + step: BASE10_STEP, + bias: BASE10_BIAS, +}; + +/// Get powers from base. +pub(crate) fn get_powers() -> &'static ModeratePathPowers { + &BASE10_POWERS +} diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/cached.rs cargo-0.47.0/vendor/serde_json/src/lexical/cached.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/cached.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/cached.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,82 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +//! Cached powers trait for extended-precision floats. + +use super::cached_float80; +use super::float::ExtendedFloat; + +// POWERS + +/// Precalculated powers that uses two-separate arrays for memory-efficiency. +#[doc(hidden)] +pub(crate) struct ExtendedFloatArray { + // Pre-calculated mantissa for the powers. + pub mant: &'static [u64], + // Pre-calculated binary exponents for the powers. + pub exp: &'static [i32], +} + +/// Allow indexing of values without bounds checking +impl ExtendedFloatArray { + #[inline] + pub fn get_extended_float(&self, index: usize) -> ExtendedFloat { + let mant = self.mant[index]; + let exp = self.exp[index]; + ExtendedFloat { mant, exp } + } + + #[inline] + pub fn len(&self) -> usize { + self.mant.len() + } +} + +// MODERATE PATH POWERS + +/// Precalculated powers of base N for the moderate path. +#[doc(hidden)] +pub(crate) struct ModeratePathPowers { + // Pre-calculated small powers. + pub small: ExtendedFloatArray, + // Pre-calculated large powers. + pub large: ExtendedFloatArray, + /// Pre-calculated small powers as 64-bit integers + pub small_int: &'static [u64], + // Step between large powers and number of small powers. + pub step: i32, + // Exponent bias for the large powers. + pub bias: i32, +} + +/// Allow indexing of values without bounds checking +impl ModeratePathPowers { + #[inline] + pub fn get_small(&self, index: usize) -> ExtendedFloat { + self.small.get_extended_float(index) + } + + #[inline] + pub fn get_large(&self, index: usize) -> ExtendedFloat { + self.large.get_extended_float(index) + } + + #[inline] + pub fn get_small_int(&self, index: usize) -> u64 { + self.small_int[index] + } +} + +// CACHED EXTENDED POWERS + +/// Cached powers as a trait for a floating-point type. +pub(crate) trait ModeratePathCache { + /// Get cached powers. + fn get_powers() -> &'static ModeratePathPowers; +} + +impl ModeratePathCache for ExtendedFloat { + #[inline] + fn get_powers() -> &'static ModeratePathPowers { + cached_float80::get_powers() + } +} diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/digit.rs cargo-0.47.0/vendor/serde_json/src/lexical/digit.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/digit.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/digit.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,15 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +//! Helpers to convert and add digits from characters. + +// Convert u8 to digit. +#[inline] +pub(crate) fn to_digit(c: u8) -> Option { + (c as char).to_digit(10) +} + +// Add digit to mantissa. +#[inline] +pub(crate) fn add_digit(value: u64, digit: u32) -> Option { + value.checked_mul(10)?.checked_add(digit as u64) +} diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/errors.rs cargo-0.47.0/vendor/serde_json/src/lexical/errors.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/errors.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/errors.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,133 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +//! Estimate the error in an 80-bit approximation of a float. +//! +//! This estimates the error in a floating-point representation. +//! +//! This implementation is loosely based off the Golang implementation, +//! found here: +//! https://golang.org/src/strconv/atof.go + +use super::float::*; +use super::num::*; +use super::rounding::*; + +pub(crate) trait FloatErrors { + /// Get the full error scale. + fn error_scale() -> u32; + /// Get the half error scale. + fn error_halfscale() -> u32; + /// Determine if the number of errors is tolerable for float precision. + fn error_is_accurate(count: u32, fp: &ExtendedFloat) -> bool; +} + +/// Check if the error is accurate with a round-nearest rounding scheme. +#[inline] +fn nearest_error_is_accurate(errors: u64, fp: &ExtendedFloat, extrabits: u64) -> bool { + // Round-to-nearest, need to use the halfway point. + if extrabits == 65 { + // Underflow, we have a shift larger than the mantissa. + // Representation is valid **only** if the value is close enough + // overflow to the next bit within errors. If it overflows, + // the representation is **not** valid. + !fp.mant.overflowing_add(errors).1 + } else { + let mask: u64 = lower_n_mask(extrabits); + let extra: u64 = fp.mant & mask; + + // Round-to-nearest, need to check if we're close to halfway. + // IE, b10100 | 100000, where `|` signifies the truncation point. + let halfway: u64 = lower_n_halfway(extrabits); + let cmp1 = halfway.wrapping_sub(errors) < extra; + let cmp2 = extra < halfway.wrapping_add(errors); + + // If both comparisons are true, we have significant rounding error, + // and the value cannot be exactly represented. Otherwise, the + // representation is valid. + !(cmp1 && cmp2) + } +} + +impl FloatErrors for u64 { + #[inline] + fn error_scale() -> u32 { + 8 + } + + #[inline] + fn error_halfscale() -> u32 { + u64::error_scale() / 2 + } + + #[inline] + fn error_is_accurate(count: u32, fp: &ExtendedFloat) -> bool { + // Determine if extended-precision float is a good approximation. + // If the error has affected too many units, the float will be + // inaccurate, or if the representation is too close to halfway + // that any operations could affect this halfway representation. + // See the documentation for dtoa for more information. + let bias = -(F::EXPONENT_BIAS - F::MANTISSA_SIZE); + let denormal_exp = bias - 63; + // This is always a valid u32, since (denormal_exp - fp.exp) + // will always be positive and the significand size is {23, 52}. + let extrabits = if fp.exp <= denormal_exp { + 64 - F::MANTISSA_SIZE + denormal_exp - fp.exp + } else { + 63 - F::MANTISSA_SIZE + }; + + // Our logic is as follows: we want to determine if the actual + // mantissa and the errors during calculation differ significantly + // from the rounding point. The rounding point for round-nearest + // is the halfway point, IE, this when the truncated bits start + // with b1000..., while the rounding point for the round-toward + // is when the truncated bits are equal to 0. + // To do so, we can check whether the rounding point +/- the error + // are >/< the actual lower n bits. + // + // For whether we need to use signed or unsigned types for this + // analysis, see this example, using u8 rather than u64 to simplify + // things. + // + // # Comparisons + // cmp1 = (halfway - errors) < extra + // cmp1 = extra < (halfway + errors) + // + // # Large Extrabits, Low Errors + // + // extrabits = 8 + // halfway = 0b10000000 + // extra = 0b10000010 + // errors = 0b00000100 + // halfway - errors = 0b01111100 + // halfway + errors = 0b10000100 + // + // Unsigned: + // halfway - errors = 124 + // halfway + errors = 132 + // extra = 130 + // cmp1 = true + // cmp2 = true + // Signed: + // halfway - errors = 124 + // halfway + errors = -124 + // extra = -126 + // cmp1 = false + // cmp2 = true + // + // # Conclusion + // + // Since errors will always be small, and since we want to detect + // if the representation is accurate, we need to use an **unsigned** + // type for comparisons. + + let extrabits = extrabits as u64; + let errors = count as u64; + if extrabits > 65 { + // Underflow, we have a literal 0. + return true; + } + + nearest_error_is_accurate(errors, fp, extrabits) + } +} diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/exponent.rs cargo-0.47.0/vendor/serde_json/src/lexical/exponent.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/exponent.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/exponent.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,50 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +//! Utilities to calculate exponents. + +/// Convert usize into i32 without overflow. +/// +/// This is needed to ensure when adjusting the exponent relative to +/// the mantissa we do not overflow for comically-long exponents. +#[inline] +fn into_i32(value: usize) -> i32 { + if value > i32::max_value() as usize { + i32::max_value() + } else { + value as i32 + } +} + +// EXPONENT CALCULATION + +// Calculate the scientific notation exponent without overflow. +// +// For example, 0.1 would be -1, and 10 would be 1 in base 10. +#[inline] +pub(crate) fn scientific_exponent( + exponent: i32, + integer_digits: usize, + fraction_start: usize, +) -> i32 { + if integer_digits == 0 { + let fraction_start = into_i32(fraction_start); + exponent.saturating_sub(fraction_start).saturating_sub(1) + } else { + let integer_shift = into_i32(integer_digits - 1); + exponent.saturating_add(integer_shift) + } +} + +// Calculate the mantissa exponent without overflow. +// +// Remove the number of digits that contributed to the mantissa past +// the dot, and add the number of truncated digits from the mantissa, +// to calculate the scaling factor for the mantissa from a raw exponent. +#[inline] +pub(crate) fn mantissa_exponent(exponent: i32, fraction_digits: usize, truncated: usize) -> i32 { + if fraction_digits > truncated { + exponent.saturating_sub(into_i32(fraction_digits - truncated)) + } else { + exponent.saturating_add(into_i32(truncated - fraction_digits)) + } +} diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/float.rs cargo-0.47.0/vendor/serde_json/src/lexical/float.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/float.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/float.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,183 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +// FLOAT TYPE + +use super::num::*; +use super::rounding::*; +use super::shift::*; + +/// Extended precision floating-point type. +/// +/// Private implementation, exposed only for testing purposes. +#[doc(hidden)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) struct ExtendedFloat { + /// Mantissa for the extended-precision float. + pub mant: u64, + /// Binary exponent for the extended-precision float. + pub exp: i32, +} + +impl ExtendedFloat { + // PROPERTIES + + // OPERATIONS + + /// Multiply two normalized extended-precision floats, as if by `a*b`. + /// + /// The precision is maximal when the numbers are normalized, however, + /// decent precision will occur as long as both values have high bits + /// set. The result is not normalized. + /// + /// Algorithm: + /// 1. Non-signed multiplication of mantissas (requires 2x as many bits as input). + /// 2. Normalization of the result (not done here). + /// 3. Addition of exponents. + pub(crate) fn mul(&self, b: &ExtendedFloat) -> ExtendedFloat { + // Logic check, values must be decently normalized prior to multiplication. + debug_assert!((self.mant & u64::HIMASK != 0) && (b.mant & u64::HIMASK != 0)); + + // Extract high-and-low masks. + let ah = self.mant >> u64::HALF; + let al = self.mant & u64::LOMASK; + let bh = b.mant >> u64::HALF; + let bl = b.mant & u64::LOMASK; + + // Get our products + let ah_bl = ah * bl; + let al_bh = al * bh; + let al_bl = al * bl; + let ah_bh = ah * bh; + + let mut tmp = (ah_bl & u64::LOMASK) + (al_bh & u64::LOMASK) + (al_bl >> u64::HALF); + // round up + tmp += 1 << (u64::HALF - 1); + + ExtendedFloat { + mant: ah_bh + (ah_bl >> u64::HALF) + (al_bh >> u64::HALF) + (tmp >> u64::HALF), + exp: self.exp + b.exp + u64::FULL, + } + } + + /// Multiply in-place, as if by `a*b`. + /// + /// The result is not normalized. + #[inline] + pub(crate) fn imul(&mut self, b: &ExtendedFloat) { + *self = self.mul(b); + } + + // NORMALIZE + + /// Normalize float-point number. + /// + /// Shift the mantissa so the number of leading zeros is 0, or the value + /// itself is 0. + /// + /// Get the number of bytes shifted. + #[inline] + pub(crate) fn normalize(&mut self) -> u32 { + // Note: + // Using the cltz intrinsic via leading_zeros is way faster (~10x) + // than shifting 1-bit at a time, via while loop, and also way + // faster (~2x) than an unrolled loop that checks at 32, 16, 4, + // 2, and 1 bit. + // + // Using a modulus of pow2 (which will get optimized to a bitwise + // and with 0x3F or faster) is slightly slower than an if/then, + // however, removing the if/then will likely optimize more branched + // code as it removes conditional logic. + + // Calculate the number of leading zeros, and then zero-out + // any overflowing bits, to avoid shl overflow when self.mant == 0. + let shift = if self.mant == 0 { + 0 + } else { + self.mant.leading_zeros() + }; + shl(self, shift as i32); + shift + } + + // ROUND + + /// Lossy round float-point number to native mantissa boundaries. + #[inline] + pub(crate) fn round_to_native(&mut self, algorithm: Algorithm) + where + F: Float, + Algorithm: FnOnce(&mut ExtendedFloat, i32), + { + round_to_native::(self, algorithm) + } + + // FROM + + /// Create extended float from native float. + #[inline] + pub fn from_float(f: F) -> ExtendedFloat { + from_float(f) + } + + // INTO + + /// Convert into default-rounded, lower-precision native float. + #[inline] + pub(crate) fn into_float(mut self) -> F { + self.round_to_native::(round_nearest_tie_even); + into_float(self) + } + + /// Convert into downward-rounded, lower-precision native float. + #[inline] + pub(crate) fn into_downward_float(mut self) -> F { + self.round_to_native::(round_downward); + into_float(self) + } +} + +// FROM FLOAT + +// Import ExtendedFloat from native float. +#[inline] +pub(crate) fn from_float(f: F) -> ExtendedFloat +where + F: Float, +{ + ExtendedFloat { + mant: u64::as_cast(f.mantissa()), + exp: f.exponent(), + } +} + +// INTO FLOAT + +// Export extended-precision float to native float. +// +// The extended-precision float must be in native float representation, +// with overflow/underflow appropriately handled. +#[inline] +pub(crate) fn into_float(fp: ExtendedFloat) -> F +where + F: Float, +{ + // Export floating-point number. + if fp.mant == 0 || fp.exp < F::DENORMAL_EXPONENT { + // sub-denormal, underflow + F::ZERO + } else if fp.exp >= F::MAX_EXPONENT { + // overflow + F::from_bits(F::INFINITY_BITS) + } else { + // calculate the exp and fraction bits, and return a float from bits. + let exp: u64; + if (fp.exp == F::DENORMAL_EXPONENT) && (fp.mant & F::HIDDEN_BIT_MASK.as_u64()) == 0 { + exp = 0; + } else { + exp = (fp.exp + F::EXPONENT_BIAS) as u64; + } + let exp = exp << F::MANTISSA_SIZE; + let mant = fp.mant & F::MANTISSA_MASK.as_u64(); + F::from_bits(F::Unsigned::as_cast(mant | exp)) + } +} diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/large_powers32.rs cargo-0.47.0/vendor/serde_json/src/lexical/large_powers32.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/large_powers32.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/large_powers32.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,183 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +//! Precalculated large powers for 32-bit limbs. + +/// Large powers (&[u32]) for base5 operations. +const POW5_1: [u32; 1] = [5]; +const POW5_2: [u32; 1] = [25]; +const POW5_3: [u32; 1] = [625]; +const POW5_4: [u32; 1] = [390625]; +const POW5_5: [u32; 2] = [2264035265, 35]; +const POW5_6: [u32; 3] = [2242703233, 762134875, 1262]; +const POW5_7: [u32; 5] = [3211403009, 1849224548, 3668416493, 3913284084, 1593091]; +const POW5_8: [u32; 10] = [ + 781532673, 64985353, 253049085, 594863151, 3553621484, 3288652808, 3167596762, 2788392729, + 3911132675, 590, +]; +const POW5_9: [u32; 19] = [ + 2553183233, 3201533787, 3638140786, 303378311, 1809731782, 3477761648, 3583367183, 649228654, + 2915460784, 487929380, 1011012442, 1677677582, 3428152256, 1710878487, 1438394610, 2161952759, + 4100910556, 1608314830, 349175, +]; +const POW5_10: [u32; 38] = [ + 4234999809, 2012377703, 2408924892, 1570150255, 3090844311, 3273530073, 1187251475, 2498123591, + 3364452033, 1148564857, 687371067, 2854068671, 1883165473, 505794538, 2988060450, 3159489326, + 2531348317, 3215191468, 849106862, 3892080979, 3288073877, 2242451748, 4183778142, 2995818208, + 2477501924, 325481258, 2487842652, 1774082830, 1933815724, 2962865281, 1168579910, 2724829000, + 2360374019, 2315984659, 2360052375, 3251779801, 1664357844, 28, +]; +const POW5_11: [u32; 75] = [ + 689565697, 4116392818, 1853628763, 516071302, 2568769159, 365238920, 336250165, 1283268122, + 3425490969, 248595470, 2305176814, 2111925499, 507770399, 2681111421, 589114268, 591287751, + 1708941527, 4098957707, 475844916, 3378731398, 2452339615, 2817037361, 2678008327, 1656645978, + 2383430340, 73103988, 448667107, 2329420453, 3124020241, 3625235717, 3208634035, 2412059158, + 2981664444, 4117622508, 838560765, 3069470027, 270153238, 1802868219, 3692709886, 2161737865, + 2159912357, 2585798786, 837488486, 4237238160, 2540319504, 3798629246, 3748148874, 1021550776, + 2386715342, 1973637538, 1823520457, 1146713475, 833971519, 3277251466, 905620390, 26278816, + 2680483154, 2294040859, 373297482, 5996609, 4109575006, 512575049, 917036550, 1942311753, + 2816916778, 3248920332, 1192784020, 3537586671, 2456567643, 2925660628, 759380297, 888447942, + 3559939476, 3654687237, 805, +]; +const POW5_12: [u32; 149] = [ + 322166785, 3809044581, 2994556223, 1239584207, 3962455841, 4001882964, 3053876612, 915114683, + 2783289745, 785739093, 4253185907, 3931164994, 1370983858, 2553556126, 3360742076, 2255410929, + 422849554, 2457422215, 3539495362, 1720790602, 1908931983, 1470596141, 592794347, 4219465164, + 4085652704, 941661409, 2534650953, 885063988, 2355909854, 2812815516, 767256131, 3821757683, + 2155151105, 3817418473, 281116564, 2834395026, 2821201622, 2524625843, 1511330880, 2572352493, + 330571332, 2951088579, 2730271766, 4044456479, 4212286644, 2444937588, 3603420843, 2387148597, + 1142537539, 3299235429, 1751012624, 861228086, 2873722519, 230498814, 1023297821, 2553128038, + 3421129895, 2651917435, 2042981258, 1606787143, 2228751918, 447345732, 1930371132, 1784132011, + 3612538790, 2275925090, 2487567871, 1080427616, 2009179183, 3383506781, 3899054063, 1950782960, + 2168622213, 2717674390, 3616636027, 2079341593, 1530129217, 1461057425, 2406264415, 3674671357, + 2972036238, 2019354295, 1455849819, 1866918619, 1324269294, 424891864, 2722422332, 2641594816, + 1400249021, 3482963993, 3734946379, 225889849, 1891545473, 777383150, 3589824633, 4117601611, + 4220028667, 334453379, 1083130821, 1060342180, 4208163139, 1489826908, 4163762246, 1096580926, + 689301528, 2336054516, 1782865703, 4175148410, 3398369392, 2329412588, 3001580596, 59740741, + 3202189932, 3351895776, 246185302, 718535188, 3772647488, 4151666556, 4055698133, 2461934110, + 2281316281, 3466396836, 3536023465, 1064267812, 2955456354, 2423805422, 3627960790, 1325057500, + 3876919979, 2009959531, 175455101, 184092852, 2358785571, 3842977831, 2485266289, 487121622, + 4159252710, 4075707558, 459389244, 300652075, 2521346588, 3458976673, 888631636, 2076098096, + 3844514585, 2363697580, 3729421522, 3051115477, 649395, +]; +const POW5_13: [u32; 298] = [ + 711442433, 3564261005, 2399042279, 4170849936, 4010295575, 1423987028, 330414929, 1349249065, + 4213813618, 3852031822, 4040843590, 2154565331, 3094013374, 1159028371, 3227065538, 2115927092, + 2085102554, 488590542, 2609619432, 3602898805, 3812736528, 3269439096, 23816114, 253984538, + 1035905997, 2942969204, 3400787671, 338562688, 1637191975, 740509713, 2264962817, 3410753922, + 4162231428, 2282041228, 1759373012, 3155367777, 4278913285, 1420532801, 1981002276, 438054990, + 1006507643, 1142697287, 1332538012, 2029019521, 3949305784, 818392641, 2491288846, 2716584663, + 3648886102, 556814413, 444795339, 4071412999, 1066321706, 4253169466, 2510832316, 672091442, + 4083256000, 2165985028, 1841538484, 3549854235, 364431512, 3707648143, 1162785440, 2268641545, + 281340310, 735693841, 848809228, 1700785200, 2919703985, 4094234344, 58530286, 965505005, + 1000010347, 3381961808, 3040089923, 1973852082, 2890971585, 1019960210, 4292895237, 2821887841, + 3756675650, 3951282907, 3885870583, 1008791145, 503998487, 1881258362, 1949332730, 392996726, + 2012973814, 3970014187, 2461725150, 2942547730, 3728066699, 2766901132, 3778532841, 1085564064, + 2278673896, 1116879805, 3448726271, 774279411, 157211670, 1506320155, 531168605, 1362654525, + 956967721, 2148871960, 769186085, 4186232894, 2055679604, 3248365487, 3981268013, 3975787984, + 2489510517, 3309046495, 212771124, 933418041, 3371839114, 562115198, 1853601831, 757336096, + 1354633440, 1486083256, 2872126393, 522920738, 1141587749, 3210903262, 1926940553, 3054024853, + 2021162538, 2262742000, 1877899947, 3147002868, 669840763, 4158174590, 4238502559, 1023731922, + 3386840011, 829588074, 3449720188, 2835142880, 2999162007, 813056473, 482949569, 638108879, + 3067201471, 1026714238, 4004452838, 2383667807, 3999477803, 771648919, 630660440, 3827121348, + 176185980, 2878191002, 2666149832, 3909811063, 2429163983, 2665690412, 907266128, 4269332098, + 2022665808, 1527122180, 3072053668, 1072477492, 3006022924, 549664855, 2800340954, 37352654, + 1212772743, 2711280533, 3029527946, 2511120040, 1305308377, 3474662224, 4226330922, 442988428, + 954940108, 3274548099, 4212288177, 2688499880, 3982226758, 3922609956, 1279948029, 1939943640, + 3650489901, 2733364929, 2494263275, 1864579964, 1225941120, 2390465139, 1267503249, 3533240729, + 904410805, 2842550015, 2517736241, 1796069820, 3335274381, 673539835, 1924694759, 3598098235, + 2792633405, 16535707, 3703535497, 3592841791, 2929082877, 1317622811, 294990855, 1396706563, + 2383271770, 3853857605, 277813677, 277580220, 1101318484, 3761974115, 1132150143, 2544692622, + 3419825776, 743770306, 1695464553, 1548693232, 2421159615, 2575672031, 2678971806, 1591267897, + 626546738, 3823443129, 267710932, 1455435162, 2353985540, 3248523795, 335348168, 3872552561, + 2814522612, 2634118860, 3503767026, 1301019273, 1414467789, 722985138, 3070909565, 4253482569, + 3744939841, 558142907, 2229819389, 13833173, 77003966, 2763671364, 3905603970, 2931990126, + 2280419384, 1879090457, 2934846267, 4284933164, 2331863845, 62191163, 3178861020, 1522063815, + 785672270, 1215568492, 2936443917, 802972489, 2956820173, 3916732783, 2893572089, 1391232801, + 3168640330, 2396859648, 894950918, 1103583736, 961991865, 2807302642, 305977505, 3054505899, + 1048256994, 781017659, 2459278754, 3164823415, 537658277, 905753687, 464963300, 4149131560, + 1029507924, 2278300961, 1231291503, 414073408, 3630740085, 2345841814, 475358196, 3258243317, + 4167625072, 4178911231, 2927355042, 655438830, 3138378018, 623200562, 2785714112, 273403236, + 807993669, 98, +]; +const POW5_14: [u32; 595] = [ + 1691320321, 2671006246, 1682531301, 2072858707, 1240508969, 3108358191, 1125119096, 2470144952, + 1610099978, 1690632660, 1941696884, 2663506355, 1006364675, 3909158537, 4147711374, 1072663936, + 4078768933, 745751659, 4123687570, 471458681, 655028926, 4113407388, 3945524552, 985625313, + 1254424514, 2127508744, 570530434, 945388122, 3194649404, 2589065070, 2731705399, 202030749, + 2090780394, 3348662271, 1481754777, 1130635472, 4025144705, 1924486271, 2578567861, 125491448, + 1558036315, 994248173, 3817216711, 763950077, 1030439870, 959586474, 3845661701, 483795093, + 1637944470, 2275463649, 3398804829, 1758016486, 2665513698, 2004912571, 1094885097, 4223064276, + 3307819021, 651121777, 1757003305, 3603542336, 129917786, 2215974994, 3042386306, 2205352757, + 3944939700, 3710987569, 97967515, 1217242524, 930630949, 3660328512, 1787663098, 1784141600, + 2500542892, 4034561586, 3444961378, 785043562, 3869499367, 885623728, 2625011087, 3053789617, + 1965731793, 3900511934, 2648823592, 3851062028, 3321968688, 799195417, 1011847510, 1369129160, + 1348009103, 2876796955, 2915408967, 3305284948, 263399535, 1715990604, 2645821294, 1587844552, + 2624912049, 3035631499, 2306636348, 3499275462, 675152704, 854794152, 4004972748, 1739996642, + 1333476491, 4012621867, 3658792931, 3297985728, 2864481726, 3066357406, 785287846, 1671499798, + 433044045, 1919608025, 264833858, 3999983367, 1116778570, 1301982149, 4213901070, 4081649357, + 536169226, 1389008649, 188923873, 373495152, 2551132278, 1800758715, 3951840330, 2632334454, + 3118778225, 1034046547, 1862428410, 3037609062, 1994608505, 29051798, 2571685694, 264151332, + 2260643090, 2717535964, 3508441116, 3283713017, 1903365635, 923575694, 1219598101, 2288281570, + 3676533911, 1014136356, 555142354, 2389170030, 4185108175, 884862419, 836141292, 2957159173, + 1997444768, 4233903127, 2876184692, 3089125070, 1480848293, 1097600237, 299700527, 2507669891, + 2982628312, 2114881043, 2529576251, 2812279824, 2987750993, 4241938954, 2204775591, 1037094060, + 829315638, 1231047149, 52608178, 3735136637, 3455232602, 962039123, 488286513, 50685385, + 3516451821, 843975207, 1572355722, 675489076, 2428445672, 1555117248, 3708476086, 10375249, + 4172112346, 2117510871, 2227658327, 3187664554, 3050656558, 328034318, 3179601324, 1247769761, + 3439263953, 1431538938, 2962525068, 1213366289, 3813013550, 2651093719, 1860661503, 3933716208, + 264320617, 789980519, 2257856172, 102000748, 977269860, 1113845122, 3008928583, 1461738106, + 557786285, 2926560363, 1038106190, 3643478847, 828004507, 457818698, 1933056971, 373408056, + 2076808229, 3160935130, 2781854874, 2519636100, 177606000, 4237103862, 3977834316, 1621936232, + 2599050516, 319893558, 3343370366, 765044144, 976657331, 7026264, 294277429, 3829376742, + 3029627280, 2705178718, 3614653880, 230519152, 3288033233, 293525479, 3805751881, 3227511198, + 2520308544, 3648103003, 1111086184, 437622105, 2232033852, 3239146386, 584244184, 1450926016, + 2462430443, 3226534010, 298582169, 4214576928, 1762099469, 964985185, 1585788148, 1641127666, + 787006566, 2315956284, 3258232694, 2275058964, 2541003317, 1508235863, 2613339827, 4080647514, + 1152057965, 3149266279, 731345410, 914737650, 65395712, 1884566942, 1379520432, 2611027720, + 4163073378, 2619704967, 2746552541, 1388822415, 3005141199, 843440249, 4288674003, 3136174279, + 4051522914, 4144149433, 3427566947, 3419023197, 3758479825, 3893877676, 96899594, 1657725776, + 253618880, 434129337, 1499045748, 2996992534, 4036042074, 2110713869, 906222950, 928326225, + 2541827893, 1604330202, 226792470, 4022228930, 815850898, 1466012310, 3377712199, 292769859, + 2822055597, 3225701344, 3052947004, 385831222, 705324593, 4030158636, 3540280538, 2982120874, + 2136414455, 255762046, 3852783591, 3262064164, 2358991588, 3756586117, 4143612643, 3326743817, + 2897365738, 807711264, 3719310016, 3721264861, 3627337076, 944539331, 3640975513, 3712525681, + 1162911839, 2008243316, 2179489649, 2867584109, 261861553, 3570253908, 2062868357, 2220328623, + 3857004679, 3744109002, 4138041873, 1451860932, 2364975637, 2802161722, 2680106834, 753401584, + 1223182946, 1245401957, 4163377735, 3565815922, 2216942838, 4036140094, 71979081, 3924559643, + 400477238, 551750683, 1174153235, 859969898, 1185921017, 1711399735, 812991545, 4051735761, + 3549118738, 1631653329, 3631835958, 3648867800, 1206500363, 2155893137, 361030362, 3454286017, + 2505909489, 1083595169, 453595313, 1510564703, 1706163902, 1632924345, 1381875722, 1661526119, + 1082778324, 3571910052, 1140625929, 851544870, 1145546234, 2938573139, 907528924, 1304752338, + 1764668294, 1788942063, 1700368828, 104979467, 1413911959, 3327497828, 1956384744, 1272712474, + 2815637534, 3307809377, 1320574940, 1111968962, 4073107827, 434096622, 169451929, 3201183459, + 3331028877, 2852366972, 3369830128, 2924794558, 3106537952, 3739481231, 1612955817, 4138608722, + 2721281595, 2755775390, 843505117, 982234295, 1157276611, 814674632, 4246504726, 3532006708, + 992340967, 1647538031, 204696133, 193866982, 3899126129, 300851698, 1379496684, 1759463683, + 1354782756, 1374637239, 3410883240, 1073406229, 3038431791, 1053909855, 3607043270, 173719711, + 3733903830, 171820911, 1573050589, 932781534, 4183534770, 2158849555, 372245998, 3573073830, + 841339264, 2759200520, 1610547277, 2603293319, 3890906486, 1557138278, 3964109906, 677238797, + 537994297, 1124184993, 4287078344, 4207654540, 2943022776, 2977947524, 3255359985, 4098397558, + 2274666217, 2915862060, 243524940, 2467726756, 2869020032, 507521339, 3403121914, 522051455, + 1803903108, 3471254194, 473535371, 1948602036, 3352095732, 3116527002, 1795743673, 775867940, + 2551469548, 3757442064, 3162525227, 3765412747, 3040105484, 1927625810, 48214767, 2997207130, + 1342349989, 2536583992, 1501320191, 3592287317, 887432730, 967585477, 3334212779, 948663609, + 1064513472, 15386372, 2465931737, 3230242590, 3036652803, 2063155087, 1927500726, 2821790499, + 2187774383, 501520074, 3688568496, 3606711121, 2576459247, 3176542345, 378322447, 156541411, + 1400607301, 1406179107, 677848877, 2253753529, 193196070, 4207435024, 4166396241, 509467541, + 2906024136, 1221753746, 3375413222, 431327897, 2749265123, 2848827671, 3412997614, 2051920238, + 1283516885, 1300498239, 1957256104, 2634010560, 3531900395, 360276850, 1461184973, 2012063967, + 2873572430, 2914608609, 4289554777, 1539331673, 1859532928, 4213441063, 538215691, 3512720863, + 4258743698, 3040408445, 982396546, 343095663, 4138069496, 1021581857, 214185242, 1968079460, + 2864275059, 3347192726, 4096783459, 3259169450, 3707808869, 142485006, 399610869, 230556456, + 2219467721, 4191227798, 2242548189, 3136366572, 179755707, 3464881829, 452317775, 3887426070, + 3446430233, 1473370015, 1576807208, 3964523248, 419325089, 2373067114, 1596072055, 1928415752, + 3635452689, 1005598891, 3335462724, 3290848636, 3669078247, 1178176812, 2110774376, 3068593619, + 1253036518, 908857731, 3631223047, 4138506423, 2903592318, 3596915748, 3289036113, 3721512676, + 2704409359, 3386016968, 3676268074, 2185259502, 1096257611, 3360076717, 3548676554, 170167319, + 3360064287, 3899940843, 9640, +]; + +pub(crate) const POW5: [&'static [u32]; 14] = [ + &POW5_1, &POW5_2, &POW5_3, &POW5_4, &POW5_5, &POW5_6, &POW5_7, &POW5_8, &POW5_9, &POW5_10, + &POW5_11, &POW5_12, &POW5_13, &POW5_14, +]; diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/large_powers64.rs cargo-0.47.0/vendor/serde_json/src/lexical/large_powers64.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/large_powers64.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/large_powers64.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,625 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +//! Precalculated large powers for 64-bit limbs. + +/// Large powers (&[u64]) for base5 operations. +const POW5_1: [u64; 1] = [5]; +const POW5_2: [u64; 1] = [25]; +const POW5_3: [u64; 1] = [625]; +const POW5_4: [u64; 1] = [390625]; +const POW5_5: [u64; 1] = [152587890625]; +const POW5_6: [u64; 2] = [3273344365508751233, 1262]; +const POW5_7: [u64; 3] = [7942358959831785217, 16807427164405733357, 1593091]; +const POW5_8: [u64; 5] = [ + 279109966635548161, + 2554917779393558781, + 14124656261812188652, + 11976055582626787546, + 2537941837315, +]; +const POW5_9: [u64; 10] = [ + 13750482914757213185, + 1302999927698857842, + 14936872543252795590, + 2788415840139466767, + 2095640732773017264, + 7205570348933370714, + 7348167152523113408, + 9285516396840364274, + 6907659600622710236, + 349175, +]; +const POW5_10: [u64; 19] = [ + 8643096425819600897, + 6743743997439985372, + 14059704609098336919, + 10729359125898331411, + 4933048501514368705, + 12258131603170554683, + 2172371001088594721, + 13569903330219142946, + 13809142207969578845, + 16716360519037769646, + 9631256923806107285, + 12866941232305103710, + 1397931361048440292, + 7619627737732970332, + 12725409486282665900, + 11703051443360963910, + 9947078370803086083, + 13966287901448440471, + 121923442132, +]; +const POW5_11: [u64; 38] = [ + 17679772531488845825, + 2216509366347768155, + 1568689219195129479, + 5511594616325588277, + 1067709417009240089, + 9070650952098657518, + 11515285870634858015, + 2539561553659505564, + 17604889300961091799, + 14511540856854204724, + 12099083339557485471, + 7115240299237943815, + 313979240050606788, + 10004784664717172195, + 15570268847930131473, + 10359715202835930803, + 17685054012115162812, + 13183273382855797757, + 7743260039872919062, + 9284593436392572926, + 11105921222066415013, + 18198799323400703846, + 16314988383739458320, + 4387527177871570570, + 8476708682254672590, + 4925096874831034057, + 14075687868072027455, + 112866656203221926, + 9852830467773230418, + 25755239915196746, + 2201493076310172510, + 8342165458688466438, + 13954006576066379050, + 15193819059903295636, + 12565616718911389531, + 3815854855847885129, + 15696762163583540628, + 805, +]; +const POW5_12: [u64; 75] = [ + 16359721904723189761, + 5323973632697650495, + 17187956456762001185, + 3930387638628283780, + 3374723710406992273, + 16884225088663222131, + 10967440051041439154, + 9686916182456720060, + 10554548046311730194, + 7390739362393647554, + 6316162333127736719, + 18122464886584070891, + 4044404959645932768, + 3801320885861987401, + 12080950653257274590, + 16414324262488991299, + 16395687498836410113, + 12173633940896186260, + 10843185433142632150, + 11048169832730399808, + 12674828934734683716, + 17370808310130582550, + 10500926985433408692, + 10252725158410704555, + 14170108270502067523, + 3698946465517688080, + 989984870770509463, + 10965601426733943069, + 11389898658438335655, + 6901098232861256586, + 1921335291173932590, + 7662788640922083388, + 9775023833308395430, + 4640401278902814207, + 14532050972198413359, + 8378549018693130223, + 11672322628395371653, + 8930704142764178555, + 6275193859483102017, + 15782593304269205087, + 8673060659034172558, + 8018354414354334043, + 1824896661540749038, + 11345563346725559868, + 14959216444480821949, + 970189517688324683, + 3338835207603007873, + 17684964260791738489, + 1436466329061721851, + 4554134986752476101, + 6398757850768963907, + 4709779218751158342, + 10033277748582410264, + 17932125878679265063, + 10004750887749091440, + 256584531835386932, + 14396282740722731628, + 3086085133731396950, + 17831272085689600064, + 10573926491412564693, + 14888061047859191737, + 4570995450261499817, + 10410165022312935266, + 5691078631447480790, + 8632710455805418155, + 790672778942823293, + 16505464105756800547, + 2092171438149740401, + 17505030673829275878, + 1291290830058928444, + 14856191690683232796, + 8916773426496500052, + 10152003807578858265, + 13104441193763861714, + 649395, +]; +const POW5_13: [u64; 149] = [ + 15308384451594534913, + 17913664074042735335, + 6115977719198531863, + 5794980608663993169, + 16544350702855106930, + 9253787637781258566, + 4977988951675168190, + 9087837664087448770, + 2098480401110016986, + 15474332540882100712, + 14042133997396540944, + 1090855284423485362, + 12639956485351058381, + 1454115676006639319, + 3180465001342538023, + 14649076551958697729, + 9801292446545910916, + 13552201410826594004, + 6101141927469189381, + 1881431857880609316, + 4907847477899433595, + 8714572486973123228, + 3514969632331374520, + 11667642286891470094, + 2391499697425323350, + 17486585679659076043, + 18267223761882105642, + 2886610765822313148, + 9302834862968900288, + 15246507846733637044, + 15924227519624562840, + 9743741243284697760, + 3159780987244964246, + 7304816812369628428, + 17584602612559717809, + 4146812420657846766, + 14525415362681041515, + 8477630142371600195, + 4380695748062263745, + 12119915994367943173, + 16970630866565485122, + 4332724980155264503, + 8079943140620527639, + 1687908087554405626, + 17051081099834002166, + 12638146269730763230, + 11883749876933445771, + 4662462156371383785, + 4796962238316531176, + 3325504751659868927, + 6469595803187862550, + 5852556621152583005, + 9229334792448387881, + 17979733373938620709, + 13951623534175792756, + 17075879371091039277, + 14212246479457938037, + 4008999959804158260, + 2414266395366403722, + 3252733766253918247, + 6382678985007829216, + 2245927470982310841, + 13790724502051307301, + 13116936866733148041, + 9718402891306794538, + 13516274400356104875, + 17859223875778049403, + 4396895129099725471, + 3563053650368467915, + 12176845952536972668, + 3492050964335269015, + 2740656767075170753, + 4409704077614761919, + 10237775279597492710, + 3314206875098230827, + 16437361028114095448, + 12361736225407656572, + 16792510651790145480, + 11449053143229929935, + 18336641737580333136, + 6558939822118891088, + 4606255756908155300, + 2360792578991605004, + 160428430149144538, + 11644861220729221511, + 10785178451159739786, + 14923560618031934681, + 1902620814992781610, + 14064076995338910412, + 11547019064112212657, + 16847481479966225734, + 8331994491163145469, + 11739712981738851885, + 8008309968651120619, + 10266969595459035264, + 15175153381217702033, + 12208659352573720245, + 7714061140750342961, + 2892831567213510541, + 15453714249045017319, + 71020323573871677, + 15431137995750602633, + 5659146884637671933, + 5998809010488554503, + 16552192379299157850, + 1192197967194298797, + 16157555793424861524, + 10929371590994640255, + 3194469143425738352, + 6651586784672005225, + 11062427140788057791, + 6834443579468668318, + 16421563197797455922, + 6251046422506172884, + 13952303462156793860, + 16632486601871393224, + 11313454360291325172, + 5587835232504462834, + 3105197524618514637, + 18268568531031972989, + 2397205535804309313, + 59413027864729597, + 11869878125348715710, + 12592801707270523266, + 8070632061321113656, + 18403647807860650811, + 267109013517069093, + 6537214311028855260, + 5220826919973709902, + 3448740582779163661, + 16822239213112884941, + 5975299384311048185, + 10294433804430712138, + 4739856055412448774, + 12057273038326387897, + 13119002941950056609, + 3354445304051737058, + 13592813067499314594, + 3890182464434078629, + 17820384357466425060, + 9785228118969879380, + 1778431746734556271, + 10075313876350055029, + 13994048489400919028, + 17948287074199726448, + 2815088342305858722, + 2676626035777198370, + 1174257960026283968, + 421714788677, +]; +const POW5_14: [u64; 298] = [ + 11471884475673051137, + 8902860357476377573, + 13350296775839230505, + 10609191786344608888, + 7261211985859587338, + 11439672689354862964, + 16789708072300570627, + 4607056528866348430, + 3202978990421512997, + 2024899620433984146, + 17666950207239811774, + 4233228489390288200, + 9137580478688460738, + 4060411066587388546, + 11119949806060600124, + 867715462473090103, + 14382394941384869610, + 4856042377419278489, + 8265605599571137921, + 538981667666252469, + 4270263388700786523, + 3281140600308898503, + 4121392524544394174, + 2077884106245940229, + 9773041957329767574, + 7550623316597646685, + 8611033926449791714, + 18137922955420802793, + 2796546741236224013, + 15477096484628446761, + 9517540128113714010, + 9471917970500821378, + 15938570248662483124, + 5228016831978462619, + 15720991252586974501, + 7662829825220776698, + 17328310068068434348, + 3371736428170309730, + 3803724952191098855, + 13115926536504376719, + 16752571196153442257, + 16540185467776259880, + 3432518182450051120, + 5880364967211798870, + 12355748840305392783, + 14196090758536469575, + 7370123524686686319, + 6819740424617592686, + 13037938013537368753, + 15029273671291927100, + 3671312928327205696, + 7473228676544792780, + 17234079691312938123, + 14164740848093544419, + 13169904779481875902, + 7179036968465894054, + 8244653688947194445, + 17179797746073799490, + 5591970751047577674, + 17530550506268329742, + 5965746721852312330, + 1604149463243472865, + 7734199791463116918, + 11305790396015856714, + 4441196105025505137, + 13046431581185664762, + 124776524294606713, + 1134521334706523966, + 11671728093344476434, + 14103440020972933148, + 3966727403013869059, + 9828094508409132821, + 4355682486381147287, + 10261407143988481234, + 3800455155249557199, + 12700901937937547500, + 18184475466894579360, + 13267691151779895412, + 4714157123477697445, + 10770360171308585263, + 9083344917597998040, + 12078649873810212155, + 18218989082046199377, + 4454285072780637351, + 5287307245618354742, + 16042289702059031730, + 4131926574212754010, + 217692071448455473, + 3624845916216282093, + 2901203491797614218, + 6679177724033967080, + 44561358851332790, + 9094639944041587162, + 13690915012276084311, + 1408896670826320686, + 5359130319612337580, + 6148412925099835601, + 5211368532286409612, + 11386360825549027374, + 16895182466965795071, + 3392940493846427241, + 438089879085393580, + 4783928372776399972, + 6278117363595909959, + 12569481049412674733, + 15648622492570893902, + 1966316336235305115, + 1603775390515993547, + 13576113010204316709, + 10821754650102840474, + 18198222517222903152, + 6966163076615302988, + 1373932372410129684, + 3285839581819684990, + 30177575069719475, + 16447047871247307061, + 11618654126674833808, + 990072222556306872, + 1260682336135768017, + 13862055046689532489, + 15668483092844698432, + 1879572630092764264, + 13912027797058626108, + 6231679788219816920, + 13857858054844167403, + 18101470072534728857, + 4144579812461609229, + 7048589655616599284, + 9946956499532694630, + 9771303850109874038, + 6477823708780339765, + 17526247621747041971, + 13525995675852669549, + 3928768291901239810, + 8094153383078124544, + 11214278667728965552, + 11251547162596832610, + 5964946855123292381, + 3622548288590237903, + 13469765967150053587, + 17798986288523466082, + 14684592818807932259, + 16724077276802963921, + 7119877993753121290, + 1864571304902781632, + 12871984921385213812, + 9065447042604670298, + 3987130777300360550, + 6890545752116901685, + 17275341711601865750, + 6296474927799264658, + 1257436973037243463, + 13854281781965301421, + 1657132483318662716, + 17309399540017292849, + 12808111630089217242, + 1098489625264462071, + 14010458905686364135, + 16134414519481621220, + 14288255900328821475, + 3469093466388187882, + 15982710881468295872, + 4056765540058056052, + 15945176389096104089, + 8625339365793505375, + 12316179968863788913, + 15334123773538054321, + 9536238824220581765, + 16080825720106203271, + 6235695225418121745, + 12035192956458019349, + 3235835166714703698, + 5348960676912581218, + 15315062772709464647, + 17335089708021308662, + 16855855317958414409, + 2369751139431140406, + 3693542588628609043, + 7350405893393987577, + 17402072586341663801, + 7007897690013647122, + 15671767872059304758, + 9259490518292347915, + 14836045474406130394, + 4654005815464502513, + 6487825998330548401, + 7013356660323385022, + 7136200343936679946, + 15341236858676437716, + 3657357368867197449, + 12621075530054608378, + 5603868621997066972, + 7683447656788439942, + 450883379216880060, + 14291494350184945047, + 5466258454997635048, + 14206933098432772126, + 4775870327277641692, + 1864430798867181939, + 13748978265070608793, + 12250822864261576589, + 12561896977498605296, + 16060949594257359328, + 17775189113543311529, + 11835965177892927035, + 4218664174878121437, + 3499000902478111683, + 15169853304359126294, + 7076121963053575143, + 832652347668916805, + 1292148207755194737, + 7556838978364207852, + 5904021986723518500, + 4610244652288570024, + 4526508363195533871, + 746120481022614726, + 737965197247830486, + 4006266184415762653, + 9272188239892688050, + 15346235246415709678, + 11850675997347533184, + 11181059668610842701, + 6687857983250662774, + 2908718488661492818, + 4828337780126983225, + 18071738646453002184, + 12790187227727197880, + 17602483480871623153, + 12523532189621855977, + 10598805712727696716, + 2179787555896149376, + 2242193929457337594, + 14908923241136742532, + 8369182018012550027, + 13385381554043022324, + 3332327430110633913, + 16138090784046208492, + 16172324607469047339, + 8279089815915615244, + 12872906602736235247, + 10894545290539475621, + 15428756545851905023, + 4155747980686992922, + 4074479178894544043, + 66083965608603584, + 13873786284662268377, + 8861183628277687555, + 12119497911296021430, + 2154012318305274287, + 15490706314503067312, + 13643145488710608367, + 672340241093017103, + 6039493278284091973, + 9679797700977436461, + 18070795828318171174, + 2188146431134935377, + 5247392385741514952, + 1852539214842869734, + 12235621681634112739, + 8812930319623534062, + 5585597406294108629, + 11312989214475901864, + 1547377291787797995, + 8641748937186208205, + 12518148659168623694, + 6611379197521520985, + 18096591571068008576, + 15087021227100112139, + 13058454842015958418, + 1473584652966833794, + 4387660670140018168, + 8452836916843525402, + 14376083294443363955, + 13998026203969090659, + 611968444648172645, + 990232438801273845, + 18001186324715561929, + 13470591857250177501, + 14881554140239420091, + 16696367836720124495, + 6328076032778459673, + 17027497695968504616, + 10192245646262428833, + 8282482589527318647, + 4319014353374321425, + 14134087271041670980, + 5060230880114618599, + 13179509240430058600, + 3903514232614801894, + 17774749744702165255, + 15448635507030969726, + 15983775238358480209, + 14542832143965487887, + 9385618098039514666, + 14431419612662304843, + 730863073501675978, + 16750118380379734815, + 9640, +]; + +pub(crate) const POW5: [&[u64]; 14] = [ + &POW5_1, &POW5_2, &POW5_3, &POW5_4, &POW5_5, &POW5_6, &POW5_7, &POW5_8, &POW5_9, &POW5_10, + &POW5_11, &POW5_12, &POW5_13, &POW5_14, +]; diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/large_powers.rs cargo-0.47.0/vendor/serde_json/src/lexical/large_powers.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/large_powers.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/large_powers.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,9 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +//! Precalculated large powers for limbs. + +#[cfg(limb_width_32)] +pub(crate) use super::large_powers32::*; + +#[cfg(limb_width_64)] +pub(crate) use super::large_powers64::*; diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/math.rs cargo-0.47.0/vendor/serde_json/src/lexical/math.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/math.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/math.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,885 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +//! Building-blocks for arbitrary-precision math. +//! +//! These algorithms assume little-endian order for the large integer +//! buffers, so for a `vec![0, 1, 2, 3]`, `3` is the most significant limb, +//! and `0` is the least significant limb. + +use super::large_powers; +use super::num::*; +use super::small_powers::*; +use crate::lib::{cmp, iter, mem, Vec}; + +// ALIASES +// ------- + +// Type for a single limb of the big integer. +// +// A limb is analogous to a digit in base10, except, it stores 32-bit +// or 64-bit numbers instead. +// +// This should be all-known 64-bit platforms supported by Rust. +// https://forge.rust-lang.org/platform-support.html +// +// Platforms where native 128-bit multiplication is explicitly supported: +// - x86_64 (Supported via `MUL`). +// - mips64 (Supported via `DMULTU`, which `HI` and `LO` can be read-from). +// +// Platforms where native 64-bit multiplication is supported and +// you can extract hi-lo for 64-bit multiplications. +// aarch64 (Requires `UMULH` and `MUL` to capture high and low bits). +// powerpc64 (Requires `MULHDU` and `MULLD` to capture high and low bits). +// +// Platforms where native 128-bit multiplication is not supported, +// requiring software emulation. +// sparc64 (`UMUL` only supported double-word arguments). + +// 32-BIT LIMB +#[cfg(limb_width_32)] +pub type Limb = u32; + +#[cfg(limb_width_32)] +pub const POW5_LIMB: &[Limb] = &POW5_32; + +#[cfg(limb_width_32)] +pub const POW10_LIMB: &[Limb] = &POW10_32; + +#[cfg(limb_width_32)] +type Wide = u64; + +// 64-BIT LIMB +#[cfg(limb_width_64)] +pub type Limb = u64; + +#[cfg(limb_width_64)] +pub const POW5_LIMB: &[Limb] = &POW5_64; + +#[cfg(limb_width_64)] +pub const POW10_LIMB: &[Limb] = &POW10_64; + +#[cfg(limb_width_64)] +type Wide = u128; + +/// Cast to limb type. +#[inline] +pub(crate) fn as_limb(t: T) -> Limb { + Limb::as_cast(t) +} + +/// Cast to wide type. +#[inline] +fn as_wide(t: T) -> Wide { + Wide::as_cast(t) +} + +// SPLIT +// ----- + +/// Split u64 into limbs, in little-endian order. +#[inline] +#[cfg(limb_width_32)] +fn split_u64(x: u64) -> [Limb; 2] { + [as_limb(x), as_limb(x >> 32)] +} + +/// Split u64 into limbs, in little-endian order. +#[inline] +#[cfg(limb_width_64)] +fn split_u64(x: u64) -> [Limb; 1] { + [as_limb(x)] +} + +// HI64 +// ---- + +// NONZERO + +/// Check if any of the remaining bits are non-zero. +#[inline] +pub fn nonzero(x: &[T], rindex: usize) -> bool { + let len = x.len(); + let slc = &x[..len - rindex]; + slc.iter().rev().any(|&x| x != T::ZERO) +} + +/// Shift 64-bit integer to high 64-bits. +#[inline] +fn u64_to_hi64_1(r0: u64) -> (u64, bool) { + debug_assert!(r0 != 0); + let ls = r0.leading_zeros(); + (r0 << ls, false) +} + +/// Shift 2 64-bit integers to high 64-bits. +#[inline] +fn u64_to_hi64_2(r0: u64, r1: u64) -> (u64, bool) { + debug_assert!(r0 != 0); + let ls = r0.leading_zeros(); + let rs = 64 - ls; + let v = match ls { + 0 => r0, + _ => (r0 << ls) | (r1 >> rs), + }; + let n = r1 << ls != 0; + (v, n) +} + +/// Trait to export the high 64-bits from a little-endian slice. +trait Hi64: AsRef<[T]> { + /// Get the hi64 bits from a 1-limb slice. + fn hi64_1(&self) -> (u64, bool); + + /// Get the hi64 bits from a 2-limb slice. + fn hi64_2(&self) -> (u64, bool); + + /// Get the hi64 bits from a 3-limb slice. + fn hi64_3(&self) -> (u64, bool); + + /// High-level exporter to extract the high 64 bits from a little-endian slice. + #[inline] + fn hi64(&self) -> (u64, bool) { + match self.as_ref().len() { + 0 => (0, false), + 1 => self.hi64_1(), + 2 => self.hi64_2(), + _ => self.hi64_3(), + } + } +} + +impl Hi64 for [u32] { + #[inline] + fn hi64_1(&self) -> (u64, bool) { + debug_assert!(self.len() == 1); + let r0 = self[0] as u64; + u64_to_hi64_1(r0) + } + + #[inline] + fn hi64_2(&self) -> (u64, bool) { + debug_assert!(self.len() == 2); + let r0 = (self[1] as u64) << 32; + let r1 = self[0] as u64; + u64_to_hi64_1(r0 | r1) + } + + #[inline] + fn hi64_3(&self) -> (u64, bool) { + debug_assert!(self.len() >= 3); + let r0 = self[self.len() - 1] as u64; + let r1 = (self[self.len() - 2] as u64) << 32; + let r2 = self[self.len() - 3] as u64; + let (v, n) = u64_to_hi64_2(r0, r1 | r2); + (v, n || nonzero(self, 3)) + } +} + +impl Hi64 for [u64] { + #[inline] + fn hi64_1(&self) -> (u64, bool) { + debug_assert!(self.len() == 1); + let r0 = self[0]; + u64_to_hi64_1(r0) + } + + #[inline] + fn hi64_2(&self) -> (u64, bool) { + debug_assert!(self.len() >= 2); + let r0 = self[self.len() - 1]; + let r1 = self[self.len() - 2]; + let (v, n) = u64_to_hi64_2(r0, r1); + (v, n || nonzero(self, 2)) + } + + #[inline] + fn hi64_3(&self) -> (u64, bool) { + self.hi64_2() + } +} + +// SCALAR +// ------ + +// Scalar-to-scalar operations, for building-blocks for arbitrary-precision +// operations. + +mod scalar { + use super::*; + + // ADDITION + + /// Add two small integers and return the resulting value and if overflow happens. + #[inline] + pub fn add(x: Limb, y: Limb) -> (Limb, bool) { + x.overflowing_add(y) + } + + /// AddAssign two small integers and return if overflow happens. + #[inline] + pub fn iadd(x: &mut Limb, y: Limb) -> bool { + let t = add(*x, y); + *x = t.0; + t.1 + } + + // SUBTRACTION + + /// Subtract two small integers and return the resulting value and if overflow happens. + #[inline] + pub fn sub(x: Limb, y: Limb) -> (Limb, bool) { + x.overflowing_sub(y) + } + + /// SubAssign two small integers and return if overflow happens. + #[inline] + pub fn isub(x: &mut Limb, y: Limb) -> bool { + let t = sub(*x, y); + *x = t.0; + t.1 + } + + // MULTIPLICATION + + /// Multiply two small integers (with carry) (and return the overflow contribution). + /// + /// Returns the (low, high) components. + #[inline] + pub fn mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb) { + // Cannot overflow, as long as wide is 2x as wide. This is because + // the following is always true: + // `Wide::max_value() - (Narrow::max_value() * Narrow::max_value()) >= Narrow::max_value()` + let z: Wide = as_wide(x) * as_wide(y) + as_wide(carry); + let bits = mem::size_of::() * 8; + (as_limb(z), as_limb(z >> bits)) + } + + /// Multiply two small integers (with carry) (and return if overflow happens). + #[inline] + pub fn imul(x: &mut Limb, y: Limb, carry: Limb) -> Limb { + let t = mul(*x, y, carry); + *x = t.0; + t.1 + } +} // scalar + +// SMALL +// ----- + +// Large-to-small operations, to modify a big integer from a native scalar. + +mod small { + use super::*; + + // MULTIPLICATIION + + /// ADDITION + + /// Implied AddAssign implementation for adding a small integer to bigint. + /// + /// Allows us to choose a start-index in x to store, to allow incrementing + /// from a non-zero start. + #[inline] + pub fn iadd_impl(x: &mut Vec, y: Limb, xstart: usize) { + if x.len() <= xstart { + x.push(y); + } else { + // Initial add + let mut carry = scalar::iadd(&mut x[xstart], y); + + // Increment until overflow stops occurring. + let mut size = xstart + 1; + while carry && size < x.len() { + carry = scalar::iadd(&mut x[size], 1); + size += 1; + } + + // If we overflowed the buffer entirely, need to add 1 to the end + // of the buffer. + if carry { + x.push(1); + } + } + } + + /// AddAssign small integer to bigint. + #[inline] + pub fn iadd(x: &mut Vec, y: Limb) { + iadd_impl(x, y, 0); + } + + // SUBTRACTION + + /// SubAssign small integer to bigint. + /// Does not do overflowing subtraction. + #[inline] + pub fn isub_impl(x: &mut Vec, y: Limb, xstart: usize) { + debug_assert!(x.len() > xstart && (x[xstart] >= y || x.len() > xstart + 1)); + + // Initial subtraction + let mut carry = scalar::isub(&mut x[xstart], y); + + // Increment until overflow stops occurring. + let mut size = xstart + 1; + while carry && size < x.len() { + carry = scalar::isub(&mut x[size], 1); + size += 1; + } + normalize(x); + } + + // MULTIPLICATION + + /// MulAssign small integer to bigint. + #[inline] + pub fn imul(x: &mut Vec, y: Limb) { + // Multiply iteratively over all elements, adding the carry each time. + let mut carry: Limb = 0; + for xi in x.iter_mut() { + carry = scalar::imul(xi, y, carry); + } + + // Overflow of value, add to end. + if carry != 0 { + x.push(carry); + } + } + + /// Mul small integer to bigint. + #[inline] + pub fn mul(x: &[Limb], y: Limb) -> Vec { + let mut z = Vec::::default(); + z.extend_from_slice(x); + imul(&mut z, y); + z + } + + /// MulAssign by a power. + /// + /// Theoretically... + /// + /// Use an exponentiation by squaring method, since it reduces the time + /// complexity of the multiplication to ~`O(log(n))` for the squaring, + /// and `O(n*m)` for the result. Since `m` is typically a lower-order + /// factor, this significantly reduces the number of multiplications + /// we need to do. Iteratively multiplying by small powers follows + /// the nth triangular number series, which scales as `O(p^2)`, but + /// where `p` is `n+m`. In short, it scales very poorly. + /// + /// Practically.... + /// + /// Exponentiation by Squaring: + /// running 2 tests + /// test bigcomp_f32_lexical ... bench: 1,018 ns/iter (+/- 78) + /// test bigcomp_f64_lexical ... bench: 3,639 ns/iter (+/- 1,007) + /// + /// Exponentiation by Iterative Small Powers: + /// running 2 tests + /// test bigcomp_f32_lexical ... bench: 518 ns/iter (+/- 31) + /// test bigcomp_f64_lexical ... bench: 583 ns/iter (+/- 47) + /// + /// Exponentiation by Iterative Large Powers (of 2): + /// running 2 tests + /// test bigcomp_f32_lexical ... bench: 671 ns/iter (+/- 31) + /// test bigcomp_f64_lexical ... bench: 1,394 ns/iter (+/- 47) + /// + /// Even using worst-case scenarios, exponentiation by squaring is + /// significantly slower for our workloads. Just multiply by small powers, + /// in simple cases, and use precalculated large powers in other cases. + pub fn imul_pow5(x: &mut Vec, n: u32) { + use super::large::KARATSUBA_CUTOFF; + + let small_powers = POW5_LIMB; + let large_powers = large_powers::POW5; + + if n == 0 { + // No exponent, just return. + // The 0-index of the large powers is `2^0`, which is 1, so we want + // to make sure we don't take that path with a literal 0. + return; + } + + // We want to use the asymptotically faster algorithm if we're going + // to be using Karabatsu multiplication sometime during the result, + // otherwise, just use exponentiation by squaring. + let bit_length = 32 - n.leading_zeros() as usize; + debug_assert!(bit_length != 0 && bit_length <= large_powers.len()); + if x.len() + large_powers[bit_length - 1].len() < 2 * KARATSUBA_CUTOFF { + // We can use iterative small powers to make this faster for the + // easy cases. + + // Multiply by the largest small power until n < step. + let step = small_powers.len() - 1; + let power = small_powers[step]; + let mut n = n as usize; + while n >= step { + imul(x, power); + n -= step; + } + + // Multiply by the remainder. + imul(x, small_powers[n]); + } else { + // In theory, this code should be asymptotically a lot faster, + // in practice, our small::imul seems to be the limiting step, + // and large imul is slow as well. + + // Multiply by higher order powers. + let mut idx: usize = 0; + let mut bit: usize = 1; + let mut n = n as usize; + while n != 0 { + if n & bit != 0 { + debug_assert!(idx < large_powers.len()); + large::imul(x, large_powers[idx]); + n ^= bit; + } + idx += 1; + bit <<= 1; + } + } + } + + // BIT LENGTH + + /// Get number of leading zero bits in the storage. + #[inline] + pub fn leading_zeros(x: &[Limb]) -> usize { + x.last().map_or(0, |x| x.leading_zeros() as usize) + } + + /// Calculate the bit-length of the big-integer. + #[inline] + pub fn bit_length(x: &[Limb]) -> usize { + let bits = mem::size_of::() * 8; + // Avoid overflowing, calculate via total number of bits + // minus leading zero bits. + let nlz = leading_zeros(x); + bits.checked_mul(x.len()) + .map_or_else(usize::max_value, |v| v - nlz) + } + + // SHL + + /// Shift-left bits inside a buffer. + /// + /// Assumes `n < Limb::BITS`, IE, internally shifting bits. + #[inline] + pub fn ishl_bits(x: &mut Vec, n: usize) { + // Need to shift by the number of `bits % Limb::BITS)`. + let bits = mem::size_of::() * 8; + debug_assert!(n < bits); + if n == 0 { + return; + } + + // Internally, for each item, we shift left by n, and add the previous + // right shifted limb-bits. + // For example, we transform (for u8) shifted left 2, to: + // b10100100 b01000010 + // b10 b10010001 b00001000 + let rshift = bits - n; + let lshift = n; + let mut prev: Limb = 0; + for xi in x.iter_mut() { + let tmp = *xi; + *xi <<= lshift; + *xi |= prev >> rshift; + prev = tmp; + } + + // Always push the carry, even if it creates a non-normal result. + let carry = prev >> rshift; + if carry != 0 { + x.push(carry); + } + } + + /// Shift-left `n` digits inside a buffer. + /// + /// Assumes `n` is not 0. + #[inline] + pub fn ishl_limbs(x: &mut Vec, n: usize) { + debug_assert!(n != 0); + if !x.is_empty() { + x.reserve(n); + x.splice(..0, iter::repeat(0).take(n)); + } + } + + /// Shift-left buffer by n bits. + #[inline] + pub fn ishl(x: &mut Vec, n: usize) { + let bits = mem::size_of::() * 8; + // Need to pad with zeros for the number of `bits / Limb::BITS`, + // and shift-left with carry for `bits % Limb::BITS`. + let rem = n % bits; + let div = n / bits; + ishl_bits(x, rem); + if div != 0 { + ishl_limbs(x, div); + } + } + + // NORMALIZE + + /// Normalize the container by popping any leading zeros. + #[inline] + pub fn normalize(x: &mut Vec) { + // Remove leading zero if we cause underflow. Since we're dividing + // by a small power, we have at max 1 int removed. + while x.last() == Some(&0) { + x.pop(); + } + } +} // small + +// LARGE +// ----- + +// Large-to-large operations, to modify a big integer from a native scalar. + +mod large { + use super::*; + + // RELATIVE OPERATORS + + /// Compare `x` to `y`, in little-endian order. + #[inline] + pub fn compare(x: &[Limb], y: &[Limb]) -> cmp::Ordering { + if x.len() > y.len() { + cmp::Ordering::Greater + } else if x.len() < y.len() { + cmp::Ordering::Less + } else { + let iter = x.iter().rev().zip(y.iter().rev()); + for (&xi, &yi) in iter { + if xi > yi { + return cmp::Ordering::Greater; + } else if xi < yi { + return cmp::Ordering::Less; + } + } + // Equal case. + cmp::Ordering::Equal + } + } + + /// Check if x is less than y. + #[inline] + pub fn less(x: &[Limb], y: &[Limb]) -> bool { + compare(x, y) == cmp::Ordering::Less + } + + /// Check if x is greater than or equal to y. + #[inline] + pub fn greater_equal(x: &[Limb], y: &[Limb]) -> bool { + !less(x, y) + } + + // ADDITION + + /// Implied AddAssign implementation for bigints. + /// + /// Allows us to choose a start-index in x to store, so we can avoid + /// padding the buffer with zeros when not needed, optimized for vectors. + pub fn iadd_impl(x: &mut Vec, y: &[Limb], xstart: usize) { + // The effective x buffer is from `xstart..x.len()`, so we need to treat + // that as the current range. If the effective y buffer is longer, need + // to resize to that, + the start index. + if y.len() > x.len() - xstart { + x.resize(y.len() + xstart, 0); + } + + // Iteratively add elements from y to x. + let mut carry = false; + for (xi, yi) in (&mut x[xstart..]).iter_mut().zip(y.iter()) { + // Only one op of the two can overflow, since we added at max + // Limb::max_value() + Limb::max_value(). Add the previous carry, + // and store the current carry for the next. + let mut tmp = scalar::iadd(xi, *yi); + if carry { + tmp |= scalar::iadd(xi, 1); + } + carry = tmp; + } + + // Overflow from the previous bit. + if carry { + small::iadd_impl(x, 1, y.len() + xstart); + } + } + + /// AddAssign bigint to bigint. + #[inline] + pub fn iadd(x: &mut Vec, y: &[Limb]) { + iadd_impl(x, y, 0) + } + + /// Add bigint to bigint. + #[inline] + pub fn add(x: &[Limb], y: &[Limb]) -> Vec { + let mut z = Vec::::default(); + z.extend_from_slice(x); + iadd(&mut z, y); + z + } + + // SUBTRACTION + + /// SubAssign bigint to bigint. + pub fn isub(x: &mut Vec, y: &[Limb]) { + // Basic underflow checks. + debug_assert!(greater_equal(x, y)); + + // Iteratively add elements from y to x. + let mut carry = false; + for (xi, yi) in x.iter_mut().zip(y.iter()) { + // Only one op of the two can overflow, since we added at max + // Limb::max_value() + Limb::max_value(). Add the previous carry, + // and store the current carry for the next. + let mut tmp = scalar::isub(xi, *yi); + if carry { + tmp |= scalar::isub(xi, 1); + } + carry = tmp; + } + + if carry { + small::isub_impl(x, 1, y.len()); + } else { + small::normalize(x); + } + } + + // MULTIPLICATION + + /// Number of digits to bottom-out to asymptotically slow algorithms. + /// + /// Karatsuba tends to out-perform long-multiplication at ~320-640 bits, + /// so we go halfway, while Newton division tends to out-perform + /// Algorithm D at ~1024 bits. We can toggle this for optimal performance. + pub const KARATSUBA_CUTOFF: usize = 32; + + /// Grade-school multiplication algorithm. + /// + /// Slow, naive algorithm, using limb-bit bases and just shifting left for + /// each iteration. This could be optimized with numerous other algorithms, + /// but it's extremely simple, and works in O(n*m) time, which is fine + /// by me. Each iteration, of which there are `m` iterations, requires + /// `n` multiplications, and `n` additions, or grade-school multiplication. + fn long_mul(x: &[Limb], y: &[Limb]) -> Vec { + // Using the immutable value, multiply by all the scalars in y, using + // the algorithm defined above. Use a single buffer to avoid + // frequent reallocations. Handle the first case to avoid a redundant + // addition, since we know y.len() >= 1. + let mut z: Vec = small::mul(x, y[0]); + z.resize(x.len() + y.len(), 0); + + // Handle the iterative cases. + for (i, &yi) in y[1..].iter().enumerate() { + let zi: Vec = small::mul(x, yi); + iadd_impl(&mut z, &zi, i + 1); + } + + small::normalize(&mut z); + + z + } + + /// Split two buffers into halfway, into (lo, hi). + #[inline] + pub fn karatsuba_split(z: &[Limb], m: usize) -> (&[Limb], &[Limb]) { + (&z[..m], &z[m..]) + } + + /// Karatsuba multiplication algorithm with roughly equal input sizes. + /// + /// Assumes `y.len() >= x.len()`. + fn karatsuba_mul(x: &[Limb], y: &[Limb]) -> Vec { + if y.len() <= KARATSUBA_CUTOFF { + // Bottom-out to long division for small cases. + long_mul(x, y) + } else if x.len() < y.len() / 2 { + karatsuba_uneven_mul(x, y) + } else { + // Do our 3 multiplications. + let m = y.len() / 2; + let (xl, xh) = karatsuba_split(x, m); + let (yl, yh) = karatsuba_split(y, m); + let sumx = add(xl, xh); + let sumy = add(yl, yh); + let z0 = karatsuba_mul(xl, yl); + let mut z1 = karatsuba_mul(&sumx, &sumy); + let z2 = karatsuba_mul(xh, yh); + // Properly scale z1, which is `z1 - z2 - zo`. + isub(&mut z1, &z2); + isub(&mut z1, &z0); + + // Create our result, which is equal to, in little-endian order: + // [z0, z1 - z2 - z0, z2] + // z1 must be shifted m digits (2^(32m)) over. + // z2 must be shifted 2*m digits (2^(64m)) over. + let len = z0.len().max(m + z1.len()).max(2 * m + z2.len()); + let mut result = z0; + result.reserve_exact(len - result.len()); + iadd_impl(&mut result, &z1, m); + iadd_impl(&mut result, &z2, 2 * m); + + result + } + } + + /// Karatsuba multiplication algorithm where y is substantially larger than x. + /// + /// Assumes `y.len() >= x.len()`. + fn karatsuba_uneven_mul(x: &[Limb], mut y: &[Limb]) -> Vec { + let mut result = Vec::::default(); + result.resize(x.len() + y.len(), 0); + + // This effectively is like grade-school multiplication between + // two numbers, except we're using splits on `y`, and the intermediate + // step is a Karatsuba multiplication. + let mut start = 0; + while !y.is_empty() { + let m = x.len().min(y.len()); + let (yl, yh) = karatsuba_split(y, m); + let prod = karatsuba_mul(x, yl); + iadd_impl(&mut result, &prod, start); + y = yh; + start += m; + } + small::normalize(&mut result); + + result + } + + /// Forwarder to the proper Karatsuba algorithm. + #[inline] + fn karatsuba_mul_fwd(x: &[Limb], y: &[Limb]) -> Vec { + if x.len() < y.len() { + karatsuba_mul(x, y) + } else { + karatsuba_mul(y, x) + } + } + + /// MulAssign bigint to bigint. + #[inline] + pub fn imul(x: &mut Vec, y: &[Limb]) { + if y.len() == 1 { + small::imul(x, y[0]); + } else { + // We're not really in a condition where using Karatsuba + // multiplication makes sense, so we're just going to use long + // division. ~20% speedup compared to: + // *x = karatsuba_mul_fwd(x, y); + *x = karatsuba_mul_fwd(x, y); + } + } +} // large + +// TRAITS +// ------ + +/// Traits for shared operations for big integers. +/// +/// None of these are implemented using normal traits, since these +/// are very expensive operations, and we want to deliberately +/// and explicitly use these functions. +pub(crate) trait Math: Clone + Sized + Default { + // DATA + + /// Get access to the underlying data + fn data(&self) -> &Vec; + + /// Get access to the underlying data + fn data_mut(&mut self) -> &mut Vec; + + // RELATIVE OPERATIONS + + /// Compare self to y. + #[inline] + fn compare(&self, y: &Self) -> cmp::Ordering { + large::compare(self.data(), y.data()) + } + + // PROPERTIES + + /// Get the high 64-bits from the bigint and if there are remaining bits. + #[inline] + fn hi64(&self) -> (u64, bool) { + self.data().as_slice().hi64() + } + + /// Calculate the bit-length of the big-integer. + /// Returns usize::max_value() if the value overflows, + /// IE, if `self.data().len() > usize::max_value() / 8`. + #[inline] + fn bit_length(&self) -> usize { + small::bit_length(self.data()) + } + + // INTEGER CONVERSIONS + + /// Create new big integer from u64. + #[inline] + fn from_u64(x: u64) -> Self { + let mut v = Self::default(); + let slc = split_u64(x); + v.data_mut().extend_from_slice(&slc); + v.normalize(); + v + } + + // NORMALIZE + + /// Normalize the integer, so any leading zero values are removed. + #[inline] + fn normalize(&mut self) { + small::normalize(self.data_mut()); + } + + // ADDITION + + /// AddAssign small integer. + #[inline] + fn iadd_small(&mut self, y: Limb) { + small::iadd(self.data_mut(), y); + } + + // MULTIPLICATION + + /// MulAssign small integer. + #[inline] + fn imul_small(&mut self, y: Limb) { + small::imul(self.data_mut(), y); + } + + /// Multiply by a power of 2. + #[inline] + fn imul_pow2(&mut self, n: u32) { + self.ishl(n as usize) + } + + /// Multiply by a power of 5. + #[inline] + fn imul_pow5(&mut self, n: u32) { + small::imul_pow5(self.data_mut(), n) + } + + /// MulAssign by a power of 10. + #[inline] + fn imul_pow10(&mut self, n: u32) { + self.imul_pow5(n); + self.imul_pow2(n); + } + + // SHIFTS + + /// Shift-left the entire buffer n bits. + #[inline] + fn ishl(&mut self, n: usize) { + small::ishl(self.data_mut(), n); + } +} diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/mod.rs cargo-0.47.0/vendor/serde_json/src/lexical/mod.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,38 @@ +// The code in this module is derived from the `lexical` crate by @Alexhuszagh +// which the author condensed into this minimal subset for use in serde_json. +// For the serde_json use case we care more about reliably round tripping all +// possible floating point values than about parsing any arbitrarily long string +// of digits with perfect accuracy, as the latter would take a high cost in +// compile time and performance. +// +// Dual licensed as MIT and Apache 2.0 just like the rest of serde_json, but +// copyright Alexander Huszagh. + +//! Fast, minimal float-parsing algorithm. + +// MODULES +pub(crate) mod algorithm; +mod bhcomp; +mod bignum; +mod cached; +mod cached_float80; +mod digit; +mod errors; +pub(crate) mod exponent; +pub(crate) mod float; +mod large_powers; +pub(crate) mod math; +pub(crate) mod num; +pub(crate) mod parse; +pub(crate) mod rounding; +mod shift; +mod small_powers; + +#[cfg(limb_width_32)] +mod large_powers32; + +#[cfg(limb_width_64)] +mod large_powers64; + +// API +pub use self::parse::{parse_concise_float, parse_truncated_float}; diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/num.rs cargo-0.47.0/vendor/serde_json/src/lexical/num.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/num.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/num.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,440 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +//! Utilities for Rust numbers. + +use crate::lib::ops; + +/// Precalculated values of radix**i for i in range [0, arr.len()-1]. +/// Each value can be **exactly** represented as that type. +const F32_POW10: [f32; 11] = [ + 1.0, + 10.0, + 100.0, + 1000.0, + 10000.0, + 100000.0, + 1000000.0, + 10000000.0, + 100000000.0, + 1000000000.0, + 10000000000.0, +]; + +/// Precalculated values of radix**i for i in range [0, arr.len()-1]. +/// Each value can be **exactly** represented as that type. +const F64_POW10: [f64; 23] = [ + 1.0, + 10.0, + 100.0, + 1000.0, + 10000.0, + 100000.0, + 1000000.0, + 10000000.0, + 100000000.0, + 1000000000.0, + 10000000000.0, + 100000000000.0, + 1000000000000.0, + 10000000000000.0, + 100000000000000.0, + 1000000000000000.0, + 10000000000000000.0, + 100000000000000000.0, + 1000000000000000000.0, + 10000000000000000000.0, + 100000000000000000000.0, + 1000000000000000000000.0, + 10000000000000000000000.0, +]; + +/// Type that can be converted to primitive with `as`. +pub trait AsPrimitive: Sized + Copy + PartialOrd { + fn as_u32(self) -> u32; + fn as_u64(self) -> u64; + fn as_u128(self) -> u128; + fn as_usize(self) -> usize; + fn as_f32(self) -> f32; + fn as_f64(self) -> f64; +} + +macro_rules! as_primitive_impl { + ($($ty:ident)*) => { + $( + impl AsPrimitive for $ty { + #[inline] + fn as_u32(self) -> u32 { + self as u32 + } + + #[inline] + fn as_u64(self) -> u64 { + self as u64 + } + + #[inline] + fn as_u128(self) -> u128 { + self as u128 + } + + #[inline] + fn as_usize(self) -> usize { + self as usize + } + + #[inline] + fn as_f32(self) -> f32 { + self as f32 + } + + #[inline] + fn as_f64(self) -> f64 { + self as f64 + } + } + )* + }; +} + +as_primitive_impl! { u32 u64 u128 usize f32 f64 } + +/// An interface for casting between machine scalars. +pub trait AsCast: AsPrimitive { + /// Creates a number from another value that can be converted into + /// a primitive via the `AsPrimitive` trait. + fn as_cast(n: N) -> Self; +} + +macro_rules! as_cast_impl { + ($ty:ident, $method:ident) => { + impl AsCast for $ty { + #[inline] + fn as_cast(n: N) -> Self { + n.$method() + } + } + }; +} + +as_cast_impl!(u32, as_u32); +as_cast_impl!(u64, as_u64); +as_cast_impl!(u128, as_u128); +as_cast_impl!(usize, as_usize); +as_cast_impl!(f32, as_f32); +as_cast_impl!(f64, as_f64); + +/// Numerical type trait. +pub trait Number: AsCast + ops::Add {} + +macro_rules! number_impl { + ($($ty:ident)*) => { + $( + impl Number for $ty {} + )* + }; +} + +number_impl! { u32 u64 u128 usize f32 f64 } + +/// Defines a trait that supports integral operations. +pub trait Integer: Number + ops::BitAnd + ops::Shr { + const ZERO: Self; +} + +macro_rules! integer_impl { + ($($ty:tt)*) => { + $( + impl Integer for $ty { + const ZERO: Self = 0; + } + )* + }; +} + +integer_impl! { u32 u64 u128 usize } + +/// Type trait for the mantissa type. +pub trait Mantissa: Integer { + /// Mask to extract the high bits from the integer. + const HIMASK: Self; + /// Mask to extract the low bits from the integer. + const LOMASK: Self; + /// Full size of the integer, in bits. + const FULL: i32; + /// Half size of the integer, in bits. + const HALF: i32 = Self::FULL / 2; +} + +impl Mantissa for u64 { + const HIMASK: u64 = 0xFFFFFFFF00000000; + const LOMASK: u64 = 0x00000000FFFFFFFF; + const FULL: i32 = 64; +} + +/// Get exact exponent limit for radix. +pub trait Float: Number { + /// Unsigned type of the same size. + type Unsigned: Integer; + + /// Literal zero. + const ZERO: Self; + /// Maximum number of digits that can contribute in the mantissa. + /// + /// We can exactly represent a float in radix `b` from radix 2 if + /// `b` is divisible by 2. This function calculates the exact number of + /// digits required to exactly represent that float. + /// + /// According to the "Handbook of Floating Point Arithmetic", + /// for IEEE754, with emin being the min exponent, p2 being the + /// precision, and b being the radix, the number of digits follows as: + /// + /// `−emin + p2 + ⌊(emin + 1) log(2, b) − log(1 − 2^(−p2), b)⌋` + /// + /// For f32, this follows as: + /// emin = -126 + /// p2 = 24 + /// + /// For f64, this follows as: + /// emin = -1022 + /// p2 = 53 + /// + /// In Python: + /// `-emin + p2 + math.floor((emin+1)*math.log(2, b) - math.log(1-2**(-p2), b))` + /// + /// This was used to calculate the maximum number of digits for [2, 36]. + const MAX_DIGITS: usize; + + // MASKS + + /// Bitmask for the sign bit. + const SIGN_MASK: Self::Unsigned; + /// Bitmask for the exponent, including the hidden bit. + const EXPONENT_MASK: Self::Unsigned; + /// Bitmask for the hidden bit in exponent, which is an implicit 1 in the fraction. + const HIDDEN_BIT_MASK: Self::Unsigned; + /// Bitmask for the mantissa (fraction), excluding the hidden bit. + const MANTISSA_MASK: Self::Unsigned; + + // PROPERTIES + + /// Positive infinity as bits. + const INFINITY_BITS: Self::Unsigned; + /// Positive infinity as bits. + const NEGATIVE_INFINITY_BITS: Self::Unsigned; + /// Size of the significand (mantissa) without hidden bit. + const MANTISSA_SIZE: i32; + /// Bias of the exponet + const EXPONENT_BIAS: i32; + /// Exponent portion of a denormal float. + const DENORMAL_EXPONENT: i32; + /// Maximum exponent value in float. + const MAX_EXPONENT: i32; + + // ROUNDING + + /// Default number of bits to shift (or 64 - mantissa size - 1). + const DEFAULT_SHIFT: i32; + /// Mask to determine if a full-carry occurred (1 in bit above hidden bit). + const CARRY_MASK: u64; + + /// Get min and max exponent limits (exact) from radix. + fn exponent_limit() -> (i32, i32); + + /// Get the number of digits that can be shifted from exponent to mantissa. + fn mantissa_limit() -> i32; + + // Re-exported methods from std. + fn pow10(self, n: i32) -> Self; + fn from_bits(u: Self::Unsigned) -> Self; + fn to_bits(self) -> Self::Unsigned; + fn is_sign_positive(self) -> bool; + fn is_sign_negative(self) -> bool; + + /// Returns true if the float is a denormal. + #[inline] + fn is_denormal(self) -> bool { + self.to_bits() & Self::EXPONENT_MASK == Self::Unsigned::ZERO + } + + /// Returns true if the float is a NaN or Infinite. + #[inline] + fn is_special(self) -> bool { + self.to_bits() & Self::EXPONENT_MASK == Self::EXPONENT_MASK + } + + /// Returns true if the float is infinite. + #[inline] + fn is_inf(self) -> bool { + self.is_special() && (self.to_bits() & Self::MANTISSA_MASK) == Self::Unsigned::ZERO + } + + /// Get exponent component from the float. + #[inline] + fn exponent(self) -> i32 { + if self.is_denormal() { + return Self::DENORMAL_EXPONENT; + } + + let bits = self.to_bits(); + let biased_e = ((bits & Self::EXPONENT_MASK) >> Self::MANTISSA_SIZE).as_u32(); + biased_e as i32 - Self::EXPONENT_BIAS + } + + /// Get mantissa (significand) component from float. + #[inline] + fn mantissa(self) -> Self::Unsigned { + let bits = self.to_bits(); + let s = bits & Self::MANTISSA_MASK; + if !self.is_denormal() { + s + Self::HIDDEN_BIT_MASK + } else { + s + } + } + + /// Get next greater float for a positive float. + /// Value must be >= 0.0 and < INFINITY. + #[inline] + fn next_positive(self) -> Self { + debug_assert!(self.is_sign_positive() && !self.is_inf()); + Self::from_bits(self.to_bits() + Self::Unsigned::as_cast(1u32)) + } + + /// Round a positive number to even. + #[inline] + fn round_positive_even(self) -> Self { + if self.mantissa() & Self::Unsigned::as_cast(1u32) == Self::Unsigned::as_cast(1u32) { + self.next_positive() + } else { + self + } + } +} + +impl Float for f32 { + type Unsigned = u32; + + const ZERO: f32 = 0.0; + const MAX_DIGITS: usize = 114; + const SIGN_MASK: u32 = 0x80000000; + const EXPONENT_MASK: u32 = 0x7F800000; + const HIDDEN_BIT_MASK: u32 = 0x00800000; + const MANTISSA_MASK: u32 = 0x007FFFFF; + const INFINITY_BITS: u32 = 0x7F800000; + const NEGATIVE_INFINITY_BITS: u32 = Self::INFINITY_BITS | Self::SIGN_MASK; + const MANTISSA_SIZE: i32 = 23; + const EXPONENT_BIAS: i32 = 127 + Self::MANTISSA_SIZE; + const DENORMAL_EXPONENT: i32 = 1 - Self::EXPONENT_BIAS; + const MAX_EXPONENT: i32 = 0xFF - Self::EXPONENT_BIAS; + const DEFAULT_SHIFT: i32 = u64::FULL - f32::MANTISSA_SIZE - 1; + const CARRY_MASK: u64 = 0x1000000; + + #[inline] + fn exponent_limit() -> (i32, i32) { + (-10, 10) + } + + #[inline] + fn mantissa_limit() -> i32 { + 7 + } + + #[inline] + fn pow10(self, n: i32) -> f32 { + // Check the exponent is within bounds in debug builds. + debug_assert!({ + let (min, max) = Self::exponent_limit(); + n >= min && n <= max + }); + + if n > 0 { + self * F32_POW10[n as usize] + } else { + self / F32_POW10[-n as usize] + } + } + + #[inline] + fn from_bits(u: u32) -> f32 { + f32::from_bits(u) + } + + #[inline] + fn to_bits(self) -> u32 { + f32::to_bits(self) + } + + #[inline] + fn is_sign_positive(self) -> bool { + f32::is_sign_positive(self) + } + + #[inline] + fn is_sign_negative(self) -> bool { + f32::is_sign_negative(self) + } +} + +impl Float for f64 { + type Unsigned = u64; + + const ZERO: f64 = 0.0; + const MAX_DIGITS: usize = 769; + const SIGN_MASK: u64 = 0x8000000000000000; + const EXPONENT_MASK: u64 = 0x7FF0000000000000; + const HIDDEN_BIT_MASK: u64 = 0x0010000000000000; + const MANTISSA_MASK: u64 = 0x000FFFFFFFFFFFFF; + const INFINITY_BITS: u64 = 0x7FF0000000000000; + const NEGATIVE_INFINITY_BITS: u64 = Self::INFINITY_BITS | Self::SIGN_MASK; + const MANTISSA_SIZE: i32 = 52; + const EXPONENT_BIAS: i32 = 1023 + Self::MANTISSA_SIZE; + const DENORMAL_EXPONENT: i32 = 1 - Self::EXPONENT_BIAS; + const MAX_EXPONENT: i32 = 0x7FF - Self::EXPONENT_BIAS; + const DEFAULT_SHIFT: i32 = u64::FULL - f64::MANTISSA_SIZE - 1; + const CARRY_MASK: u64 = 0x20000000000000; + + #[inline] + fn exponent_limit() -> (i32, i32) { + (-22, 22) + } + + #[inline] + fn mantissa_limit() -> i32 { + 15 + } + + #[inline] + fn pow10(self, n: i32) -> f64 { + // Check the exponent is within bounds in debug builds. + debug_assert!({ + let (min, max) = Self::exponent_limit(); + n >= min && n <= max + }); + + if n > 0 { + self * F64_POW10[n as usize] + } else { + self / F64_POW10[-n as usize] + } + } + + #[inline] + fn from_bits(u: u64) -> f64 { + f64::from_bits(u) + } + + #[inline] + fn to_bits(self) -> u64 { + f64::to_bits(self) + } + + #[inline] + fn is_sign_positive(self) -> bool { + f64::is_sign_positive(self) + } + + #[inline] + fn is_sign_negative(self) -> bool { + f64::is_sign_negative(self) + } +} diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/parse.rs cargo-0.47.0/vendor/serde_json/src/lexical/parse.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/parse.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/parse.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,83 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +use super::algorithm::*; +use super::bhcomp::*; +use super::digit::*; +use super::exponent::*; +use super::num::*; + +// PARSERS +// ------- + +/// Parse float for which the entire integer and fraction parts fit into a 64 +/// bit mantissa. +pub fn parse_concise_float(mantissa: u64, mant_exp: i32) -> F +where + F: Float, +{ + if let Some(float) = fast_path(mantissa, mant_exp) { + return float; + } + + // Moderate path (use an extended 80-bit representation). + let truncated = false; + let (fp, valid) = moderate_path::(mantissa, mant_exp, truncated); + if valid { + return fp.into_float::(); + } + + let b = fp.into_downward_float::(); + if b.is_special() { + // We have a non-finite number, we get to leave early. + return b; + } + + // Slow path, fast path didn't work. + let mut buffer = itoa::Buffer::new(); + let integer = buffer.format(mantissa).as_bytes(); + let fraction = &[]; + bhcomp(b, integer, fraction, mant_exp) +} + +/// Parse float from extracted float components. +/// +/// * `integer` - Slice containing the integer digits. +/// * `fraction` - Slice containing the fraction digits. +/// * `exponent` - Parsed, 32-bit exponent. +/// +/// Precondition: The integer must not have leading zeros. +pub fn parse_truncated_float(integer: &[u8], mut fraction: &[u8], exponent: i32) -> F +where + F: Float, +{ + // Trim trailing zeroes from the fraction part. + while fraction.last() == Some(&b'0') { + fraction = &fraction[..fraction.len() - 1]; + } + + // Calculate the number of truncated digits. + let mut truncated = 0; + let mut mantissa: u64 = 0; + let mut iter = integer.iter().chain(fraction); + for &c in &mut iter { + mantissa = match add_digit(mantissa, to_digit(c).unwrap()) { + Some(v) => v, + None => { + truncated = 1 + iter.count(); + break; + } + }; + } + + let mant_exp = mantissa_exponent(exponent, fraction.len(), truncated); + let is_truncated = true; + + fallback_path( + integer, + fraction, + mantissa, + exponent, + mant_exp, + is_truncated, + ) +} diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/rounding.rs cargo-0.47.0/vendor/serde_json/src/lexical/rounding.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/rounding.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/rounding.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,231 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +//! Defines rounding schemes for floating-point numbers. + +use super::float::ExtendedFloat; +use super::num::*; +use super::shift::*; +use crate::lib::mem; + +// MASKS + +/// Calculate a scalar factor of 2 above the halfway point. +#[inline] +pub(crate) fn nth_bit(n: u64) -> u64 { + let bits: u64 = mem::size_of::() as u64 * 8; + debug_assert!(n < bits, "nth_bit() overflow in shl."); + + 1 << n +} + +/// Generate a bitwise mask for the lower `n` bits. +#[inline] +pub(crate) fn lower_n_mask(n: u64) -> u64 { + let bits: u64 = mem::size_of::() as u64 * 8; + debug_assert!(n <= bits, "lower_n_mask() overflow in shl."); + + if n == bits { + u64::max_value() + } else { + (1 << n) - 1 + } +} + +/// Calculate the halfway point for the lower `n` bits. +#[inline] +pub(crate) fn lower_n_halfway(n: u64) -> u64 { + let bits: u64 = mem::size_of::() as u64 * 8; + debug_assert!(n <= bits, "lower_n_halfway() overflow in shl."); + + if n == 0 { + 0 + } else { + nth_bit(n - 1) + } +} + +/// Calculate a bitwise mask with `n` 1 bits starting at the `bit` position. +#[inline] +pub(crate) fn internal_n_mask(bit: u64, n: u64) -> u64 { + let bits: u64 = mem::size_of::() as u64 * 8; + debug_assert!(bit <= bits, "internal_n_halfway() overflow in shl."); + debug_assert!(n <= bits, "internal_n_halfway() overflow in shl."); + debug_assert!(bit >= n, "internal_n_halfway() overflow in sub."); + + lower_n_mask(bit) ^ lower_n_mask(bit - n) +} + +// NEAREST ROUNDING + +// Shift right N-bytes and round to the nearest. +// +// Return if we are above halfway and if we are halfway. +#[inline] +pub(crate) fn round_nearest(fp: &mut ExtendedFloat, shift: i32) -> (bool, bool) { + // Extract the truncated bits using mask. + // Calculate if the value of the truncated bits are either above + // the mid-way point, or equal to it. + // + // For example, for 4 truncated bytes, the mask would be b1111 + // and the midway point would be b1000. + let mask: u64 = lower_n_mask(shift as u64); + let halfway: u64 = lower_n_halfway(shift as u64); + + let truncated_bits = fp.mant & mask; + let is_above = truncated_bits > halfway; + let is_halfway = truncated_bits == halfway; + + // Bit shift so the leading bit is in the hidden bit. + overflowing_shr(fp, shift); + + (is_above, is_halfway) +} + +// Tie rounded floating point to event. +#[inline] +pub(crate) fn tie_even(fp: &mut ExtendedFloat, is_above: bool, is_halfway: bool) { + // Extract the last bit after shifting (and determine if it is odd). + let is_odd = fp.mant & 1 == 1; + + // Calculate if we need to roundup. + // We need to roundup if we are above halfway, or if we are odd + // and at half-way (need to tie-to-even). + if is_above || (is_odd && is_halfway) { + fp.mant += 1; + } +} + +// Shift right N-bytes and round nearest, tie-to-even. +// +// Floating-point arithmetic uses round to nearest, ties to even, +// which rounds to the nearest value, if the value is halfway in between, +// round to an even value. +#[inline] +pub(crate) fn round_nearest_tie_even(fp: &mut ExtendedFloat, shift: i32) { + let (is_above, is_halfway) = round_nearest(fp, shift); + tie_even(fp, is_above, is_halfway); +} + +// DIRECTED ROUNDING + +// Shift right N-bytes and round towards a direction. +// +// Return if we have any truncated bytes. +#[inline] +fn round_toward(fp: &mut ExtendedFloat, shift: i32) -> bool { + let mask: u64 = lower_n_mask(shift as u64); + let truncated_bits = fp.mant & mask; + + // Bit shift so the leading bit is in the hidden bit. + overflowing_shr(fp, shift); + + truncated_bits != 0 +} + +// Round down. +#[inline] +fn downard(_: &mut ExtendedFloat, _: bool) {} + +// Shift right N-bytes and round toward zero. +// +// Floating-point arithmetic defines round toward zero, which rounds +// towards positive zero. +#[inline] +pub(crate) fn round_downward(fp: &mut ExtendedFloat, shift: i32) { + // Bit shift so the leading bit is in the hidden bit. + // No rounding schemes, so we just ignore everything else. + let is_truncated = round_toward(fp, shift); + downard(fp, is_truncated); +} + +// ROUND TO FLOAT + +// Shift the ExtendedFloat fraction to the fraction bits in a native float. +// +// Floating-point arithmetic uses round to nearest, ties to even, +// which rounds to the nearest value, if the value is halfway in between, +// round to an even value. +#[inline] +pub(crate) fn round_to_float(fp: &mut ExtendedFloat, algorithm: Algorithm) +where + F: Float, + Algorithm: FnOnce(&mut ExtendedFloat, i32), +{ + // Calculate the difference to allow a single calculation + // rather than a loop, to minimize the number of ops required. + // This does underflow detection. + let final_exp = fp.exp + F::DEFAULT_SHIFT; + if final_exp < F::DENORMAL_EXPONENT { + // We would end up with a denormal exponent, try to round to more + // digits. Only shift right if we can avoid zeroing out the value, + // which requires the exponent diff to be < M::BITS. The value + // is already normalized, so we shouldn't have any issue zeroing + // out the value. + let diff = F::DENORMAL_EXPONENT - fp.exp; + if diff <= u64::FULL { + // We can avoid underflow, can get a valid representation. + algorithm(fp, diff); + } else { + // Certain underflow, assign literal 0s. + fp.mant = 0; + fp.exp = 0; + } + } else { + algorithm(fp, F::DEFAULT_SHIFT); + } + + if fp.mant & F::CARRY_MASK == F::CARRY_MASK { + // Roundup carried over to 1 past the hidden bit. + shr(fp, 1); + } +} + +// AVOID OVERFLOW/UNDERFLOW + +// Avoid overflow for large values, shift left as needed. +// +// Shift until a 1-bit is in the hidden bit, if the mantissa is not 0. +#[inline] +pub(crate) fn avoid_overflow(fp: &mut ExtendedFloat) +where + F: Float, +{ + // Calculate the difference to allow a single calculation + // rather than a loop, minimizing the number of ops required. + if fp.exp >= F::MAX_EXPONENT { + let diff = fp.exp - F::MAX_EXPONENT; + if diff <= F::MANTISSA_SIZE { + // Our overflow mask needs to start at the hidden bit, or at + // `F::MANTISSA_SIZE+1`, and needs to have `diff+1` bits set, + // to see if our value overflows. + let bit = (F::MANTISSA_SIZE + 1) as u64; + let n = (diff + 1) as u64; + let mask = internal_n_mask(bit, n); + if (fp.mant & mask) == 0 { + // If we have no 1-bit in the hidden-bit position, + // which is index 0, we need to shift 1. + let shift = diff + 1; + shl(fp, shift); + } + } + } +} + +// ROUND TO NATIVE + +// Round an extended-precision float to a native float representation. +#[inline] +pub(crate) fn round_to_native(fp: &mut ExtendedFloat, algorithm: Algorithm) +where + F: Float, + Algorithm: FnOnce(&mut ExtendedFloat, i32), +{ + // Shift all the way left, to ensure a consistent representation. + // The following right-shifts do not work for a non-normalized number. + fp.normalize(); + + // Round so the fraction is in a native mantissa representation, + // and avoid overflow/underflow. + round_to_float::(fp, algorithm); + avoid_overflow::(fp); +} diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/shift.rs cargo-0.47.0/vendor/serde_json/src/lexical/shift.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/shift.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/shift.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,46 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +//! Bit-shift helpers. + +use super::float::ExtendedFloat; +use crate::lib::mem; + +// Shift extended-precision float right `shift` bytes. +#[inline] +pub(crate) fn shr(fp: &mut ExtendedFloat, shift: i32) { + let bits: u64 = mem::size_of::() as u64 * 8; + debug_assert!((shift as u64) < bits, "shr() overflow in shift right."); + + fp.mant >>= shift; + fp.exp += shift; +} + +// Shift extended-precision float right `shift` bytes. +// +// Accepts when the shift is the same as the type size, and +// sets the value to 0. +#[inline] +pub(crate) fn overflowing_shr(fp: &mut ExtendedFloat, shift: i32) { + let bits: u64 = mem::size_of::() as u64 * 8; + debug_assert!( + (shift as u64) <= bits, + "overflowing_shr() overflow in shift right." + ); + + fp.mant = if shift as u64 == bits { + 0 + } else { + fp.mant >> shift + }; + fp.exp += shift; +} + +// Shift extended-precision float left `shift` bytes. +#[inline] +pub(crate) fn shl(fp: &mut ExtendedFloat, shift: i32) { + let bits: u64 = mem::size_of::() as u64 * 8; + debug_assert!((shift as u64) < bits, "shl() overflow in shift left."); + + fp.mant <<= shift; + fp.exp -= shift; +} diff -Nru cargo-0.44.1/vendor/serde_json/src/lexical/small_powers.rs cargo-0.47.0/vendor/serde_json/src/lexical/small_powers.rs --- cargo-0.44.1/vendor/serde_json/src/lexical/small_powers.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lexical/small_powers.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,70 @@ +// Adapted from https://github.com/Alexhuszagh/rust-lexical. + +//! Pre-computed small powers. + +// 32 BIT +#[cfg(limb_width_32)] +pub(crate) const POW5_32: [u32; 14] = [ + 1, 5, 25, 125, 625, 3125, 15625, 78125, 390625, 1953125, 9765625, 48828125, 244140625, + 1220703125, +]; + +#[cfg(limb_width_32)] +pub(crate) const POW10_32: [u32; 10] = [ + 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, +]; + +// 64 BIT +#[cfg(limb_width_64)] +pub(crate) const POW5_64: [u64; 28] = [ + 1, + 5, + 25, + 125, + 625, + 3125, + 15625, + 78125, + 390625, + 1953125, + 9765625, + 48828125, + 244140625, + 1220703125, + 6103515625, + 30517578125, + 152587890625, + 762939453125, + 3814697265625, + 19073486328125, + 95367431640625, + 476837158203125, + 2384185791015625, + 11920928955078125, + 59604644775390625, + 298023223876953125, + 1490116119384765625, + 7450580596923828125, +]; +pub(crate) const POW10_64: [u64; 20] = [ + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000, + 10000000000, + 100000000000, + 1000000000000, + 10000000000000, + 100000000000000, + 1000000000000000, + 10000000000000000, + 100000000000000000, + 1000000000000000000, + 10000000000000000000, +]; diff -Nru cargo-0.44.1/vendor/serde_json/src/lib.rs cargo-0.47.0/vendor/serde_json/src/lib.rs --- cargo-0.44.1/vendor/serde_json/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -300,27 +300,39 @@ //! [macro]: https://docs.serde.rs/serde_json/macro.json.html //! [`serde-json-core`]: https://japaric.github.io/serde-json-core/serde_json_core/ -#![doc(html_root_url = "https://docs.rs/serde_json/1.0.53")] +#![doc(html_root_url = "https://docs.rs/serde_json/1.0.58")] #![deny(clippy::all, clippy::pedantic)] // Ignored clippy lints #![allow( + clippy::comparison_chain, clippy::deprecated_cfg_attr, clippy::doc_markdown, + clippy::excessive_precision, + clippy::float_cmp, + clippy::match_like_matches_macro, clippy::match_single_binding, clippy::needless_doctest_main, - clippy::transmute_ptr_to_ptr + clippy::transmute_ptr_to_ptr, + // clippy bug: https://github.com/rust-lang/rust-clippy/issues/5704 + clippy::unnested_or_patterns, )] // Ignored clippy_pedantic lints #![allow( // Deserializer::from_str, into_iter clippy::should_implement_trait, // integer and float ser/de requires these sorts of casts + clippy::cast_possible_truncation, clippy::cast_possible_wrap, clippy::cast_precision_loss, clippy::cast_sign_loss, // correctly used clippy::enum_glob_use, + clippy::if_not_else, clippy::integer_division, + clippy::map_err_ignore, + clippy::match_same_arms, + clippy::similar_names, + clippy::unused_self, clippy::wildcard_imports, // things are often more readable this way clippy::cast_lossless, @@ -328,11 +340,13 @@ clippy::shadow_unrelated, clippy::single_match_else, clippy::too_many_lines, + clippy::unreadable_literal, + clippy::unseparated_literal_suffix, clippy::use_self, clippy::zero_prefixed_literal, // we support older compilers clippy::checked_conversions, - clippy::redundant_field_names, + clippy::mem_replace_with_default, // noisy clippy::missing_errors_doc, clippy::must_use_candidate, @@ -357,6 +371,7 @@ pub use std::*; } + pub use self::core::ops::{Bound, RangeBounds}; pub use self::core::cell::{Cell, RefCell}; pub use self::core::clone::{self, Clone}; pub use self::core::convert::{self, From, Into}; @@ -423,6 +438,9 @@ crate::lib::Result::Err(err) => return crate::lib::Result::Err(err), } }; + ($e:expr,) => { + tri!($e) + }; } #[macro_use] @@ -442,6 +460,8 @@ mod io; #[cfg(feature = "std")] mod iter; +#[cfg(feature = "float_roundtrip")] +mod lexical; mod number; mod read; diff -Nru cargo-0.44.1/vendor/serde_json/src/macros.rs cargo-0.47.0/vendor/serde_json/src/macros.rs --- cargo-0.44.1/vendor/serde_json/src/macros.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/macros.rs 2020-10-01 21:38:28.000000000 +0000 @@ -224,6 +224,11 @@ json_internal!(@object $object ($key) (: $($rest)*) (: $($rest)*)); }; + // Refuse to absorb colon token into key expression. + (@object $object:ident ($($key:tt)*) (: $($unexpected:tt)+) $copy:tt) => { + json_expect_expr_comma!($($unexpected)+); + }; + // Munch a token into the current key. (@object $object:ident ($($key:tt)*) ($tt:tt $($rest:tt)*) $copy:tt) => { json_internal!(@object $object ($($key)* $tt) ($($rest)*) ($($rest)*)); @@ -290,3 +295,9 @@ macro_rules! json_unexpected { () => {}; } + +#[macro_export] +#[doc(hidden)] +macro_rules! json_expect_expr_comma { + ($e:expr , $($tt:tt)*) => {}; +} diff -Nru cargo-0.44.1/vendor/serde_json/src/map.rs cargo-0.47.0/vendor/serde_json/src/map.rs --- cargo-0.44.1/vendor/serde_json/src/map.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/map.rs 2020-10-01 21:38:28.000000000 +0000 @@ -34,22 +34,17 @@ } } - #[cfg(not(feature = "preserve_order"))] - /// Makes a new empty Map with the given initial capacity. - #[inline] - pub fn with_capacity(capacity: usize) -> Self { - // does not support with_capacity - let _ = capacity; - Map { - map: BTreeMap::new(), - } - } - - #[cfg(feature = "preserve_order")] /// Makes a new empty Map with the given initial capacity. #[inline] pub fn with_capacity(capacity: usize) -> Self { Map { + #[cfg(not(feature = "preserve_order"))] + map: { + // does not support with_capacity + let _ = capacity; + BTreeMap::new() + }, + #[cfg(feature = "preserve_order")] map: IndexMap::with_capacity(capacity), } } @@ -127,11 +122,59 @@ return self.map.remove(key); } + /// Removes a key from the map, returning the stored key and value if the + /// key was previously in the map. + /// + /// The key may be any borrowed form of the map's key type, but the ordering + /// on the borrowed form *must* match the ordering on the key type. + pub fn remove_entry(&mut self, key: &Q) -> Option<(String, Value)> + where + String: Borrow, + Q: ?Sized + Ord + Eq + Hash, + { + #[cfg(any(feature = "preserve_order", not(no_btreemap_remove_entry)))] + return self.map.remove_entry(key); + #[cfg(all( + not(feature = "preserve_order"), + no_btreemap_remove_entry, + not(no_btreemap_get_key_value), + ))] + { + let (key, _value) = self.map.get_key_value(key)?; + let key = key.clone(); + let value = self.map.remove::(&key)?; + Some((key, value)) + } + #[cfg(all( + not(feature = "preserve_order"), + no_btreemap_remove_entry, + no_btreemap_get_key_value, + ))] + { + struct Key<'a, Q: ?Sized>(&'a Q); + + impl<'a, Q: ?Sized> RangeBounds for Key<'a, Q> { + fn start_bound(&self) -> Bound<&Q> { + Bound::Included(self.0) + } + fn end_bound(&self) -> Bound<&Q> { + Bound::Included(self.0) + } + } + + let mut range = self.map.range(Key(key)); + let (key, _value) = range.next()?; + let key = key.clone(); + let value = self.map.remove::(&key)?; + Some((key, value)) + } + } + /// Moves all elements from other into Self, leaving other empty. #[inline] pub fn append(&mut self, other: &mut Self) { #[cfg(feature = "preserve_order")] - for (k, v) in std::mem::replace(&mut other.map, MapImpl::default()).into_iter() { + for (k, v) in mem::replace(&mut other.map, MapImpl::default()) { self.map.insert(k, v); } #[cfg(not(feature = "preserve_order"))] @@ -150,8 +193,8 @@ use indexmap::map::Entry as EntryImpl; match self.map.entry(key.into()) { - EntryImpl::Vacant(vacant) => Entry::Vacant(VacantEntry { vacant: vacant }), - EntryImpl::Occupied(occupied) => Entry::Occupied(OccupiedEntry { occupied: occupied }), + EntryImpl::Vacant(vacant) => Entry::Vacant(VacantEntry { vacant }), + EntryImpl::Occupied(occupied) => Entry::Occupied(OccupiedEntry { occupied }), } } diff -Nru cargo-0.44.1/vendor/serde_json/src/number.rs cargo-0.47.0/vendor/serde_json/src/number.rs --- cargo-0.44.1/vendor/serde_json/src/number.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/number.rs 2020-10-01 21:38:28.000000000 +0000 @@ -239,7 +239,7 @@ ryu::Buffer::new().format_finite(f).to_owned() } }; - Some(Number { n: n }) + Some(Number { n }) } else { None } @@ -250,7 +250,7 @@ #[doc(hidden)] #[inline] pub fn from_string_unchecked(n: String) -> Self { - Number { n: n } + Number { n } } } @@ -655,7 +655,7 @@ #[cfg(feature = "arbitrary_precision")] ParserNumber::String(s) => s, }; - Number { n: n } + Number { n } } } @@ -675,7 +675,7 @@ itoa::Buffer::new().format(u).to_owned() } }; - Number { n: n } + Number { n } } } )* @@ -704,7 +704,7 @@ itoa::Buffer::new().format(i).to_owned() } }; - Number { n: n } + Number { n } } } )* diff -Nru cargo-0.44.1/vendor/serde_json/src/read.rs cargo-0.47.0/vendor/serde_json/src/read.rs --- cargo-0.44.1/vendor/serde_json/src/read.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/read.rs 2020-10-01 21:38:28.000000000 +0000 @@ -186,20 +186,11 @@ { /// Create a JSON input source to read from a std::io input stream. pub fn new(reader: R) -> Self { - #[cfg(not(feature = "raw_value"))] - { - IoRead { - iter: LineColIterator::new(reader.bytes()), - ch: None, - } - } - #[cfg(feature = "raw_value")] - { - IoRead { - iter: LineColIterator::new(reader.bytes()), - ch: None, - raw_buffer: None, - } + IoRead { + iter: LineColIterator::new(reader.bytes()), + ch: None, + #[cfg(feature = "raw_value")] + raw_buffer: None, } } } @@ -406,20 +397,11 @@ impl<'a> SliceRead<'a> { /// Create a JSON input source to read from a slice of bytes. pub fn new(slice: &'a [u8]) -> Self { - #[cfg(not(feature = "raw_value"))] - { - SliceRead { - slice: slice, - index: 0, - } - } - #[cfg(feature = "raw_value")] - { - SliceRead { - slice: slice, - index: 0, - raw_buffering_start_index: 0, - } + SliceRead { + slice, + index: 0, + #[cfg(feature = "raw_value")] + raw_buffering_start_index: 0, } } @@ -625,18 +607,10 @@ impl<'a> StrRead<'a> { /// Create a JSON input source to read from a UTF-8 string. pub fn new(s: &'a str) -> Self { - #[cfg(not(feature = "raw_value"))] - { - StrRead { - delegate: SliceRead::new(s.as_bytes()), - } - } - #[cfg(feature = "raw_value")] - { - StrRead { - delegate: SliceRead::new(s.as_bytes()), - data: s, - } + StrRead { + delegate: SliceRead::new(s.as_bytes()), + #[cfg(feature = "raw_value")] + data: s, } } } @@ -719,6 +693,77 @@ } } +////////////////////////////////////////////////////////////////////////////// + +impl<'a, 'de, R> private::Sealed for &'a mut R where R: Read<'de> {} + +impl<'a, 'de, R> Read<'de> for &'a mut R +where + R: Read<'de>, +{ + fn next(&mut self) -> Result> { + R::next(self) + } + + fn peek(&mut self) -> Result> { + R::peek(self) + } + + fn discard(&mut self) { + R::discard(self) + } + + fn position(&self) -> Position { + R::position(self) + } + + fn peek_position(&self) -> Position { + R::peek_position(self) + } + + fn byte_offset(&self) -> usize { + R::byte_offset(self) + } + + fn parse_str<'s>(&'s mut self, scratch: &'s mut Vec) -> Result> { + R::parse_str(self, scratch) + } + + fn parse_str_raw<'s>( + &'s mut self, + scratch: &'s mut Vec, + ) -> Result> { + R::parse_str_raw(self, scratch) + } + + fn ignore_str(&mut self) -> Result<()> { + R::ignore_str(self) + } + + fn decode_hex_escape(&mut self) -> Result { + R::decode_hex_escape(self) + } + + #[cfg(feature = "raw_value")] + fn begin_raw_buffering(&mut self) { + R::begin_raw_buffering(self) + } + + #[cfg(feature = "raw_value")] + fn end_raw_buffering(&mut self, visitor: V) -> Result + where + V: Visitor<'de>, + { + R::end_raw_buffering(self, visitor) + } + + const should_early_return_if_failed: bool = R::should_early_return_if_failed; + + fn set_failed(&mut self, failed: &mut bool) { + R::set_failed(self, failed) + } +} + ////////////////////////////////////////////////////////////////////////////// /// Marker for whether StreamDeserializer can implement FusedIterator. diff -Nru cargo-0.44.1/vendor/serde_json/src/ser.rs cargo-0.47.0/vendor/serde_json/src/ser.rs --- cargo-0.44.1/vendor/serde_json/src/ser.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/ser.rs 2020-10-01 21:38:28.000000000 +0000 @@ -44,10 +44,7 @@ /// specified. #[inline] pub fn with_formatter(writer: W, formatter: F) -> Self { - Serializer { - writer: writer, - formatter: formatter, - } + Serializer { writer, formatter } } /// Unwrap the `Writer` from the `Serializer`. @@ -1950,7 +1947,7 @@ PrettyFormatter { current_indent: 0, has_value: false, - indent: indent, + indent, } } } diff -Nru cargo-0.44.1/vendor/serde_json/src/value/de.rs cargo-0.47.0/vendor/serde_json/src/value/de.rs --- cargo-0.44.1/vendor/serde_json/src/value/de.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/serde_json/src/value/de.rs 2020-10-01 21:38:28.000000000 +0000 @@ -137,7 +137,7 @@ } } -macro_rules! deserialize_prim_number { +macro_rules! deserialize_number { ($method:ident) => { #[cfg(not(feature = "arbitrary_precision"))] fn $method(self, visitor: V) -> Result @@ -218,20 +218,20 @@ } } - deserialize_prim_number!(deserialize_i8); - deserialize_prim_number!(deserialize_i16); - deserialize_prim_number!(deserialize_i32); - deserialize_prim_number!(deserialize_i64); - deserialize_prim_number!(deserialize_u8); - deserialize_prim_number!(deserialize_u16); - deserialize_prim_number!(deserialize_u32); - deserialize_prim_number!(deserialize_u64); - deserialize_prim_number!(deserialize_f32); - deserialize_prim_number!(deserialize_f64); + deserialize_number!(deserialize_i8); + deserialize_number!(deserialize_i16); + deserialize_number!(deserialize_i32); + deserialize_number!(deserialize_i64); + deserialize_number!(deserialize_u8); + deserialize_number!(deserialize_u16); + deserialize_number!(deserialize_u32); + deserialize_number!(deserialize_u64); + deserialize_number!(deserialize_f32); + deserialize_number!(deserialize_f64); serde_if_integer128! { - deserialize_prim_number!(deserialize_i128); - deserialize_prim_number!(deserialize_u128); + deserialize_number!(deserialize_i128); + deserialize_number!(deserialize_u128); } #[inline] @@ -285,10 +285,7 @@ } }; - visitor.visit_enum(EnumDeserializer { - variant: variant, - value: value, - }) + visitor.visit_enum(EnumDeserializer { variant, value }) } #[inline] @@ -544,7 +541,7 @@ other.unexpected(), &"struct variant", )), - _ => Err(serde::de::Error::invalid_type( + None => Err(serde::de::Error::invalid_type( Unexpected::UnitVariant, &"struct variant", )), @@ -777,8 +774,8 @@ deserialize_value_ref_number!(deserialize_f64); serde_if_integer128! { - deserialize_prim_number!(deserialize_i128); - deserialize_prim_number!(deserialize_u128); + deserialize_number!(deserialize_i128); + deserialize_number!(deserialize_u128); } fn deserialize_option(self, visitor: V) -> Result @@ -830,10 +827,7 @@ } }; - visitor.visit_enum(EnumRefDeserializer { - variant: variant, - value: value, - }) + visitor.visit_enum(EnumRefDeserializer { variant, value }) } #[inline] @@ -1078,7 +1072,7 @@ other.unexpected(), &"struct variant", )), - _ => Err(serde::de::Error::invalid_type( + None => Err(serde::de::Error::invalid_type( Unexpected::UnitVariant, &"struct variant", )), @@ -1387,7 +1381,7 @@ impl<'de> BorrowedCowStrDeserializer<'de> { fn new(value: Cow<'de, str>) -> Self { - BorrowedCowStrDeserializer { value: value } + BorrowedCowStrDeserializer { value } } } diff -Nru cargo-0.44.1/vendor/shell-escape/.cargo-checksum.json cargo-0.47.0/vendor/shell-escape/.cargo-checksum.json --- cargo-0.44.1/vendor/shell-escape/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/shell-escape/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"170a13e64f2a51b77a45702ba77287f5c6829375b04a69cf2222acd17d0cfab9"} \ No newline at end of file +{"files":{},"package":"45bb67a18fa91266cc7807181f62f9178a6873bfad7dc788c42e6430db40184f"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/shell-escape/Cargo.toml cargo-0.47.0/vendor/shell-escape/Cargo.toml --- cargo-0.44.1/vendor/shell-escape/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/shell-escape/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,7 +12,7 @@ [package] name = "shell-escape" -version = "0.1.4" +version = "0.1.5" authors = ["Steven Fackler "] description = "Escape characters that may have a special meaning in a shell" license = "MIT/Apache-2.0" diff -Nru cargo-0.44.1/vendor/shell-escape/src/lib.rs cargo-0.47.0/vendor/shell-escape/src/lib.rs --- cargo-0.44.1/vendor/shell-escape/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/shell-escape/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -33,7 +33,7 @@ /// /// [msdn]: http://blogs.msdn.com/b/twistylittlepassagesallalike/archive/2011/04/23/everyone-quotes-arguments-the-wrong-way.aspx pub fn escape(s: Cow) -> Cow { - let mut needs_escape = false; + let mut needs_escape = s.is_empty(); for ch in s.chars() { match ch { '"' | '\t' | '\n' | ' ' => needs_escape = true, @@ -82,6 +82,7 @@ r#""--features=\"default\"""#); assert_eq!(escape(r#"\path\to\my documents\"#.into()), r#""\path\to\my documents\\""#); + assert_eq!(escape("".into()), r#""""#); } } @@ -98,7 +99,7 @@ /// Escape characters that may have special meaning in a shell, including spaces. pub fn escape(s: Cow) -> Cow { - if !s.contains(non_whitelisted) { + if !s.is_empty() && !s.contains(non_whitelisted) { return s; } @@ -128,6 +129,7 @@ assert_eq!(escape("linker=gcc -L/foo -Wl,bar".into()), r#"'linker=gcc -L/foo -Wl,bar'"#); assert_eq!(escape(r#"--features="default""#.into()), r#"'--features="default"'"#); assert_eq!(escape(r#"'!\$`\\\n "#.into()), r#"''\'''\!'\$`\\\n '"#); + assert_eq!(escape("".into()), r#"''"#); } } diff -Nru cargo-0.44.1/vendor/sized-chunks/.cargo-checksum.json cargo-0.47.0/vendor/sized-chunks/.cargo-checksum.json --- cargo-0.44.1/vendor/sized-chunks/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"d59044ea371ad781ff976f7b06480b9f0180e834eda94114f2afb4afc12b7718"} \ No newline at end of file +{"files":{},"package":"1ec31ceca5644fa6d444cc77548b88b67f46db6f7c71683b0f9336e671830d2f"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/sized-chunks/Cargo.toml cargo-0.47.0/vendor/sized-chunks/Cargo.toml --- cargo-0.44.1/vendor/sized-chunks/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "sized-chunks" -version = "0.5.3" +version = "0.6.2" authors = ["Bodil Stokke "] exclude = ["release.toml", "proptest-regressions/**"] description = "Efficient sized chunk datatypes" @@ -29,24 +29,27 @@ [[bench]] name = "sized_chunk" harness = false -[dependencies.arbitrary] -version = "0.4" -optional = true +#[dependencies.arbitrary] +#version = "0.4" +#optional = true + +#[dependencies.array-ops] +#version = "0.1" +#optional = true [dependencies.bitmaps] version = "2.0.0" -[dependencies.refpool] -version = "0.3" -optional = true +#[dependencies.refpool] +#version = "0.4" +#optional = true [dependencies.typenum] version = "1.11.2" [dev-dependencies.criterion] -version = "0.3.0" - -[dev-dependencies.proptest] -version = "0.9.5" +version = "0.3" -[dev-dependencies.proptest-derive] -version = "0.1.2" +[features] +default = ["std"] +#ringbuffer = ["array-ops"] +std = [] diff -Nru cargo-0.44.1/vendor/sized-chunks/CHANGELOG.md cargo-0.47.0/vendor/sized-chunks/CHANGELOG.md --- cargo-0.44.1/vendor/sized-chunks/CHANGELOG.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/CHANGELOG.md 2020-10-01 21:38:28.000000000 +0000 @@ -5,6 +5,55 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## [0.6.2] - 2020-05-15 + +### FIXED + +- This release exists for no other purpose than to bump the `refpool` optional dependency. + +## [0.6.1] - 2020-03-26 + +### ADDED + +- The crate now has a `std` feature flag, which is on by default, and will make the crate `no_std` + if disabled. + +### FIXED + +- Fixed a compilation error if you had the `arbitrary` feature flag enabled without the + `ringbuffer` flag. + +## [0.6.0] - 2020-03-24 + +### CHANGED + +- `RingBuffer` and its accompanying slice types `Slice` and `SliceMut` now implement `Array` and + `ArrayMut` from [`array-ops`](http://docs.rs/array-ops), giving them most of the methods that + would be available on primitive slice types and cutting down on code duplication in the + implementation, but at the price of having to pull `Array` et al into scope when you need them. + Because this means adding a dependency to `array-ops`, `RingBuffer` has now been moved behind + the `ringbuffer` feature flag. `Chunk` and `InlineArray` don't and won't implement `Array`, + because they are both able to implement `Deref<[A]>`, which provides the same functionality more + efficiently. + +### ADDED + +- The `insert_from` and `insert_ordered` methods recently added to `Chunk` have now also been + added to `RingBuffer`. +- `RingBuffer`'s `Slice` and `SliceMut` now also have the three `binary_search` methods regular + slices have. +- `SparseChunk`, `RingBuffer`, `Slice` and `SliceMut` now have unsafe `get_unchecked` and + `get_unchecked_mut` methods. +- `PartialEq` implementations allowing you to compare `RingBuffer`s, `Slice`s and `SliceMut`s + interchangeably have been added. + +### FIXED + +- Fixed an aliasing issue in `RingBuffer`'s mutable iterator, as uncovered by Miri. Behind the + scenes, the full non-fuzzing unit test suite is now able to run on Miri without crashing it + (after migrating the last Proptest tests away from the test suite into the fuzz targets), and + this has been included in its CI build. (#6) + ## [0.5.3] - 2020-03-11 ### FIXED diff -Nru cargo-0.44.1/vendor/sized-chunks/debian/patches/disable-features.diff cargo-0.47.0/vendor/sized-chunks/debian/patches/disable-features.diff --- cargo-0.44.1/vendor/sized-chunks/debian/patches/disable-features.diff 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/debian/patches/disable-features.diff 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,41 @@ +Index: sized-chunks/Cargo.toml +=================================================================== +--- sized-chunks.orig/Cargo.toml ++++ sized-chunks/Cargo.toml +@@ -29,20 +29,20 @@ all-features = true + [[bench]] + name = "sized_chunk" + harness = false +-[dependencies.arbitrary] +-version = "0.4" +-optional = true ++#[dependencies.arbitrary] ++#version = "0.4" ++#optional = true + +-[dependencies.array-ops] +-version = "0.1" +-optional = true ++#[dependencies.array-ops] ++#version = "0.1" ++#optional = true + + [dependencies.bitmaps] + version = "2.0.0" + +-[dependencies.refpool] +-version = "0.4" +-optional = true ++#[dependencies.refpool] ++#version = "0.4" ++#optional = true + + [dependencies.typenum] + version = "1.11.2" +@@ -51,5 +51,5 @@ version = "0.3" + + [features] + default = ["std"] +-ringbuffer = ["array-ops"] ++#ringbuffer = ["array-ops"] + std = [] diff -Nru cargo-0.44.1/vendor/sized-chunks/debian/patches/series cargo-0.47.0/vendor/sized-chunks/debian/patches/series --- cargo-0.44.1/vendor/sized-chunks/debian/patches/series 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/debian/patches/series 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1 @@ +disable-features.diff diff -Nru cargo-0.44.1/vendor/sized-chunks/.pc/applied-patches cargo-0.47.0/vendor/sized-chunks/.pc/applied-patches --- cargo-0.44.1/vendor/sized-chunks/.pc/applied-patches 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/.pc/applied-patches 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1 @@ +disable-features.diff diff -Nru cargo-0.44.1/vendor/sized-chunks/.pc/disable-features.diff/Cargo.toml cargo-0.47.0/vendor/sized-chunks/.pc/disable-features.diff/Cargo.toml --- cargo-0.44.1/vendor/sized-chunks/.pc/disable-features.diff/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/.pc/disable-features.diff/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,55 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "sized-chunks" +version = "0.6.2" +authors = ["Bodil Stokke "] +exclude = ["release.toml", "proptest-regressions/**"] +description = "Efficient sized chunk datatypes" +documentation = "http://docs.rs/sized-chunks" +readme = "./README.md" +keywords = ["sparse-array"] +categories = ["data-structures"] +license = "MPL-2.0+" +repository = "https://github.com/bodil/sized-chunks" +[package.metadata.docs.rs] +all-features = true + +[[bench]] +name = "sized_chunk" +harness = false +[dependencies.arbitrary] +version = "0.4" +optional = true + +[dependencies.array-ops] +version = "0.1" +optional = true + +[dependencies.bitmaps] +version = "2.0.0" + +[dependencies.refpool] +version = "0.4" +optional = true + +[dependencies.typenum] +version = "1.11.2" +[dev-dependencies.criterion] +version = "0.3" + +[features] +default = ["std"] +ringbuffer = ["array-ops"] +std = [] diff -Nru cargo-0.44.1/vendor/sized-chunks/.pc/.quilt_patches cargo-0.47.0/vendor/sized-chunks/.pc/.quilt_patches --- cargo-0.44.1/vendor/sized-chunks/.pc/.quilt_patches 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/.pc/.quilt_patches 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1 @@ +debian/patches diff -Nru cargo-0.44.1/vendor/sized-chunks/.pc/.quilt_series cargo-0.47.0/vendor/sized-chunks/.pc/.quilt_series --- cargo-0.44.1/vendor/sized-chunks/.pc/.quilt_series 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/.pc/.quilt_series 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1 @@ +series diff -Nru cargo-0.44.1/vendor/sized-chunks/.pc/.version cargo-0.47.0/vendor/sized-chunks/.pc/.version --- cargo-0.44.1/vendor/sized-chunks/.pc/.version 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/.pc/.version 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1 @@ +2 diff -Nru cargo-0.44.1/vendor/sized-chunks/src/arbitrary.rs cargo-0.47.0/vendor/sized-chunks/src/arbitrary.rs --- cargo-0.44.1/vendor/sized-chunks/src/arbitrary.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/arbitrary.rs 2020-10-01 21:38:28.000000000 +0000 @@ -3,11 +3,14 @@ // file, You can obtain one at http://mozilla.org/MPL/2.0/. use bitmaps::Bits; -use std::iter; +use core::iter; use ::arbitrary::{size_hint, Arbitrary, Result, Unstructured}; -use crate::{types::ChunkLength, Chunk, InlineArray, RingBuffer, SparseChunk}; +use crate::{types::ChunkLength, Chunk, InlineArray, SparseChunk}; + +#[cfg(feature = "ringbuffer")] +use crate::RingBuffer; fn empty() -> Box> { Box::new(iter::empty()) @@ -68,6 +71,7 @@ } } +#[cfg(feature = "ringbuffer")] impl Arbitrary for RingBuffer where A: Arbitrary, diff -Nru cargo-0.44.1/vendor/sized-chunks/src/inline_array/iter.rs cargo-0.47.0/vendor/sized-chunks/src/inline_array/iter.rs --- cargo-0.44.1/vendor/sized-chunks/src/inline_array/iter.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/inline_array/iter.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,4 @@ -use std::iter::FusedIterator; +use core::iter::FusedIterator; use crate::InlineArray; diff -Nru cargo-0.44.1/vendor/sized-chunks/src/inline_array/mod.rs cargo-0.47.0/vendor/sized-chunks/src/inline_array/mod.rs --- cargo-0.44.1/vendor/sized-chunks/src/inline_array/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/inline_array/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -6,16 +6,16 @@ //! //! See [`InlineArray`](struct.InlineArray.html) -use std::borrow::{Borrow, BorrowMut}; -use std::cmp::Ordering; -use std::fmt::{Debug, Error, Formatter}; -use std::hash::{Hash, Hasher}; -use std::iter::FromIterator; -use std::marker::PhantomData; -use std::mem::{self, MaybeUninit}; -use std::ops::{Deref, DerefMut}; -use std::ptr; -use std::slice::{from_raw_parts, from_raw_parts_mut, Iter as SliceIter, IterMut as SliceIterMut}; +use core::borrow::{Borrow, BorrowMut}; +use core::cmp::Ordering; +use core::fmt::{Debug, Error, Formatter}; +use core::hash::{Hash, Hasher}; +use core::iter::FromIterator; +use core::marker::PhantomData; +use core::mem::{self, MaybeUninit}; +use core::ops::{Deref, DerefMut}; +use core::ptr; +use core::slice::{from_raw_parts, from_raw_parts_mut, Iter as SliceIter, IterMut as SliceIterMut}; mod iter; pub use self::iter::{Drain, Iter}; @@ -165,12 +165,6 @@ self_ } - #[inline] - #[must_use] - fn get_unchecked(&self, index: usize) -> &A { - unsafe { &*self.data().add(index) } - } - /// Push an item to the back of the array. /// /// Panics if the capacity of the array is exceeded. @@ -267,16 +261,16 @@ } #[inline] - fn drop_contents(&mut self) { - unsafe { ptr::drop_in_place::<[A]>(&mut **self) } + unsafe fn drop_contents(&mut self) { + ptr::drop_in_place::<[A]>(&mut **self) } /// Discard the contents of the array. /// /// Time: O(n) pub fn clear(&mut self) { - self.drop_contents(); unsafe { + self.drop_contents(); *self.len_mut() = 0; } } @@ -289,7 +283,7 @@ impl Drop for InlineArray { fn drop(&mut self) { - self.drop_contents() + unsafe { self.drop_contents() } } } diff -Nru cargo-0.44.1/vendor/sized-chunks/src/lib.rs cargo-0.47.0/vendor/sized-chunks/src/lib.rs --- cargo-0.44.1/vendor/sized-chunks/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -24,7 +24,7 @@ //! ## Data Structures //! //! | Type | Description | Push | Pop | Deref to `&[A]` | -//! | --- | --- | --- | --- | --- | +//! | ---- | ----------- | ---- | --- | --------------- | //! | [`Chunk`][Chunk] | Contiguous array | O(1)/O(n) | O(1) | Yes | //! | [`RingBuffer`][RingBuffer] | Non-contiguous array | O(1) | O(1) | No | //! | [`SparseChunk`][SparseChunk] | Sparse array | N/A | N/A | No | @@ -45,7 +45,7 @@ //! side will cause the latter to run in linear time if there's no room (which //! there would only be if you've popped from that side). //! -//!To choose between them, you can use the following rules: +//! To choose between them, you can use the following rules: //! - I only ever want to push to the back: you don't need this crate, try //! [`ArrayVec`][ArrayVec]. //! - I need to push to either side but probably not both on the same array: use @@ -62,20 +62,50 @@ //! overhead. Its API is also more consistent with a map than an array - there's //! no push, pop, append, etc, just insert, remove and lookup. //! +//! # [`InlineArray`][InlineArray] +//! +//! Finally, there's [`InlineArray`][InlineArray], which is a simple vector that's +//! sized to fit inside any `Sized` type that's big enough to hold a size counter +//! and at least one instance of the array element type. This can be a useful +//! optimisation when implementing a list like data structure with a nontrivial +//! set of pointers in its full form, where you could plausibly fit several +//! elements inside the space allocated for the pointers. `im::Vector` is a +//! good example of that, and the use case for which [`InlineArray`][InlineArray] +//! was implemented. +//! +//! # Feature Flags +//! +//! The following feature flags are available: +//! +//! | Feature | Description | +//! | ------- | ----------- | +//! | `arbitrary` | Provides [`Arbitrary`][Arbitrary] implementations from the [`arbitrary`][arbitrary_crate] crate. Requires the `std` flag. | +//! | `refpool` | Provides [`PoolDefault`][PoolDefault] and [`PoolClone`][PoolClone] implemetations from the [`refpool`][refpool] crate. | +//! | `ringbuffer` | Enables the [`RingBuffer`][RingBuffer] data structure. | +//! | `std` | Without this flag (enabled by default), the crate will be `no_std`, and absent traits relating to `std::collections` and `std::io`. | +//! //! [immutable.rs]: https://immutable.rs/ //! [typenum]: https://docs.rs/typenum/ //! [Chunk]: struct.Chunk.html //! [RingBuffer]: struct.RingBuffer.html //! [SparseChunk]: struct.SparseChunk.html +//! [InlineArray]: struct.InlineArray.html //! [ArrayVec]: https://docs.rs/arrayvec/ +//! [Arbitrary]: https://docs.rs/arbitrary/latest/arbitrary/trait.Arbitrary.html +//! [arbitrary_crate]: https://docs.rs/arbitrary +//! [refpool]: https://docs.rs/refpool +//! [PoolDefault]: https://docs.rs/refpool/latest/refpool/trait.PoolDefault.html +//! [PoolClone]: https://docs.rs/refpool/latest/refpool/trait.PoolClone.html #![forbid(rust_2018_idioms)] #![deny(nonstandard_style)] #![warn(unreachable_pub, missing_docs)] #![cfg_attr(test, deny(warnings))] +#![cfg_attr(not(any(feature = "std", test)), no_std)] +// Jeremy Francis Corbyn, clippy devs need to calm down 🤦‍♀️ +#![allow(clippy::suspicious_op_assign_impl, clippy::suspicious_arithmetic_impl)] pub mod inline_array; -pub mod ring_buffer; pub mod sized_chunk; pub mod sparse_chunk; pub mod types; @@ -87,6 +117,10 @@ mod arbitrary; pub use crate::inline_array::InlineArray; -pub use crate::ring_buffer::RingBuffer; pub use crate::sized_chunk::Chunk; pub use crate::sparse_chunk::SparseChunk; + +#[cfg(feature = "ringbuffer")] +pub mod ring_buffer; +#[cfg(feature = "ringbuffer")] +pub use crate::ring_buffer::RingBuffer; diff -Nru cargo-0.44.1/vendor/sized-chunks/src/ring_buffer/index.rs cargo-0.47.0/vendor/sized-chunks/src/ring_buffer/index.rs --- cargo-0.44.1/vendor/sized-chunks/src/ring_buffer/index.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/ring_buffer/index.rs 2020-10-01 21:38:28.000000000 +0000 @@ -2,15 +2,15 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. -use std::iter::FusedIterator; -use std::marker::PhantomData; -use std::ops::{Add, AddAssign, Sub, SubAssign}; +use core::iter::FusedIterator; +use core::marker::PhantomData; +use core::ops::{Add, AddAssign, Sub, SubAssign}; -use crate::types::ChunkLength; +use typenum::Unsigned; -pub(crate) struct RawIndex>(usize, PhantomData<(A, N)>); +pub(crate) struct RawIndex(usize, PhantomData); -impl> Clone for RawIndex { +impl Clone for RawIndex { #[inline] #[must_use] fn clone(&self) -> Self { @@ -18,9 +18,9 @@ } } -impl Copy for RawIndex where N: ChunkLength
{} +impl Copy for RawIndex where N: Unsigned {} -impl> RawIndex { +impl RawIndex { #[inline] #[must_use] pub(crate) fn to_usize(self) -> usize { @@ -53,7 +53,7 @@ } } -impl> From for RawIndex { +impl From for RawIndex { #[inline] #[must_use] fn from(index: usize) -> Self { @@ -62,7 +62,7 @@ } } -impl> PartialEq for RawIndex { +impl PartialEq for RawIndex { #[inline] #[must_use] fn eq(&self, other: &Self) -> bool { @@ -70,10 +70,10 @@ } } -impl> Eq for RawIndex {} +impl Eq for RawIndex {} -impl> Add for RawIndex { - type Output = RawIndex; +impl Add for RawIndex { + type Output = RawIndex; #[inline] #[must_use] fn add(self, other: Self) -> Self::Output { @@ -81,8 +81,8 @@ } } -impl> Add for RawIndex { - type Output = RawIndex; +impl Add for RawIndex { + type Output = RawIndex; #[inline] #[must_use] fn add(self, other: usize) -> Self::Output { @@ -94,7 +94,7 @@ } } -impl> AddAssign for RawIndex { +impl AddAssign for RawIndex { #[inline] fn add_assign(&mut self, other: usize) { self.0 += other; @@ -104,8 +104,8 @@ } } -impl> Sub for RawIndex { - type Output = RawIndex; +impl Sub for RawIndex { + type Output = RawIndex; #[inline] #[must_use] fn sub(self, other: Self) -> Self::Output { @@ -113,8 +113,8 @@ } } -impl> Sub for RawIndex { - type Output = RawIndex; +impl Sub for RawIndex { + type Output = RawIndex; #[inline] #[must_use] fn sub(self, other: usize) -> Self::Output { @@ -126,7 +126,7 @@ } } -impl> SubAssign for RawIndex { +impl SubAssign for RawIndex { #[inline] fn sub_assign(&mut self, other: usize) { while other > self.0 { @@ -136,14 +136,14 @@ } } -pub(crate) struct IndexIter> { +pub(crate) struct IndexIter { pub(crate) remaining: usize, - pub(crate) left_index: RawIndex, - pub(crate) right_index: RawIndex, + pub(crate) left_index: RawIndex, + pub(crate) right_index: RawIndex, } -impl> Iterator for IndexIter { - type Item = RawIndex; +impl Iterator for IndexIter { + type Item = RawIndex; #[inline] fn next(&mut self) -> Option { if self.remaining > 0 { @@ -161,7 +161,7 @@ } } -impl> DoubleEndedIterator for IndexIter { +impl DoubleEndedIterator for IndexIter { #[inline] fn next_back(&mut self) -> Option { if self.remaining > 0 { @@ -173,6 +173,6 @@ } } -impl> ExactSizeIterator for IndexIter {} +impl ExactSizeIterator for IndexIter {} -impl> FusedIterator for IndexIter {} +impl FusedIterator for IndexIter {} diff -Nru cargo-0.44.1/vendor/sized-chunks/src/ring_buffer/iter.rs cargo-0.47.0/vendor/sized-chunks/src/ring_buffer/iter.rs --- cargo-0.44.1/vendor/sized-chunks/src/ring_buffer/iter.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/ring_buffer/iter.rs 2020-10-01 21:38:28.000000000 +0000 @@ -2,11 +2,13 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. -use std::iter::FusedIterator; +use core::iter::FusedIterator; +use core::marker::PhantomData; use crate::types::ChunkLength; use super::{index::RawIndex, RingBuffer}; +use array_ops::HasLength; /// A reference iterator over a `RingBuffer`. pub struct Iter<'a, A, N> @@ -14,8 +16,8 @@ N: ChunkLength, { pub(crate) buffer: &'a RingBuffer, - pub(crate) left_index: RawIndex, - pub(crate) right_index: RawIndex, + pub(crate) left_index: RawIndex, + pub(crate) right_index: RawIndex, pub(crate) remaining: usize, } @@ -64,15 +66,45 @@ where N: ChunkLength, { - pub(crate) buffer: &'a mut RingBuffer, - pub(crate) left_index: RawIndex, - pub(crate) right_index: RawIndex, - pub(crate) remaining: usize, + data: *mut A, + left_index: RawIndex, + right_index: RawIndex, + remaining: usize, + phantom: PhantomData<&'a ()>, +} + +impl<'a, A, N> IterMut<'a, A, N> +where + N: ChunkLength, + A: 'a, +{ + pub(crate) fn new(buffer: &mut RingBuffer) -> Self { + Self::new_slice(buffer, buffer.origin, buffer.len()) + } + + pub(crate) fn new_slice( + buffer: &mut RingBuffer, + origin: RawIndex, + len: usize, + ) -> Self { + Self { + left_index: origin, + right_index: origin + len, + remaining: len, + phantom: PhantomData, + data: buffer.data.as_mut_ptr().cast(), + } + } + + unsafe fn mut_ptr(&mut self, index: RawIndex) -> *mut A { + self.data.add(index.to_usize()) + } } impl<'a, A, N> Iterator for IterMut<'a, A, N> where N: ChunkLength, + A: 'a, { type Item = &'a mut A; @@ -81,7 +113,8 @@ None } else { self.remaining -= 1; - Some(unsafe { &mut *self.buffer.mut_ptr(self.left_index.inc()) }) + let index = self.left_index.inc(); + Some(unsafe { &mut *self.mut_ptr(index) }) } } @@ -95,20 +128,32 @@ impl<'a, A, N> DoubleEndedIterator for IterMut<'a, A, N> where N: ChunkLength, + A: 'a, { fn next_back(&mut self) -> Option { if self.remaining == 0 { None } else { self.remaining -= 1; - Some(unsafe { &mut *self.buffer.mut_ptr(self.right_index.dec()) }) + let index = self.right_index.dec(); + Some(unsafe { &mut *self.mut_ptr(index) }) } } } -impl<'a, A, N> ExactSizeIterator for IterMut<'a, A, N> where N: ChunkLength {} +impl<'a, A, N> ExactSizeIterator for IterMut<'a, A, N> +where + N: ChunkLength, + A: 'a, +{ +} -impl<'a, A, N> FusedIterator for IterMut<'a, A, N> where N: ChunkLength {} +impl<'a, A, N> FusedIterator for IterMut<'a, A, N> +where + N: ChunkLength, + A: 'a, +{ +} /// A draining iterator over a `RingBuffer`. pub struct Drain<'a, A, N: ChunkLength> { diff -Nru cargo-0.44.1/vendor/sized-chunks/src/ring_buffer/mod.rs cargo-0.47.0/vendor/sized-chunks/src/ring_buffer/mod.rs --- cargo-0.44.1/vendor/sized-chunks/src/ring_buffer/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/ring_buffer/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -6,17 +6,19 @@ //! //! See [`RingBuffer`](struct.RingBuffer.html) -use std::borrow::Borrow; -use std::cmp::Ordering; -use std::fmt::{Debug, Error, Formatter}; -use std::hash::{Hash, Hasher}; -use std::iter::FromIterator; -use std::mem::MaybeUninit; -use std::ops::{Bound, Range, RangeBounds}; -use std::ops::{Index, IndexMut}; +use core::borrow::Borrow; +use core::cmp::Ordering; +use core::fmt::{Debug, Error, Formatter}; +use core::hash::{Hash, Hasher}; +use core::iter::FromIterator; +use core::mem::MaybeUninit; +use core::ops::{Bound, Range, RangeBounds}; +use core::ops::{Index, IndexMut}; use typenum::U64; +pub use array_ops::{Array, ArrayMut, HasLength}; + use crate::types::ChunkLength; mod index; @@ -49,9 +51,13 @@ /// The `RingBuffer` offers its own [`Slice`][Slice] and [`SliceMut`][SliceMut] /// types to compensate for the loss of being able to take a slice, but they're /// somewhat less efficient, so the general rule should be that you shouldn't -/// choose a `RingBuffer` if you really need to take slices - but if you don't, +/// choose a `RingBuffer` if you rely heavily on slices - but if you don't, /// it's probably a marginally better choice overall than [`Chunk`][Chunk]. /// +/// # Feature Flag +/// +/// To use this data structure, you need to enable the `ringbuffer` feature. +/// /// [Chunk]: ../sized_chunk/struct.Chunk.html /// [Slice]: struct.Slice.html /// [SliceMut]: struct.SliceMut.html @@ -59,7 +65,7 @@ where N: ChunkLength, { - origin: RawIndex, + origin: RawIndex, length: usize, data: MaybeUninit, } @@ -67,7 +73,7 @@ impl> Drop for RingBuffer { #[inline] fn drop(&mut self) { - if std::mem::needs_drop::() { + if core::mem::needs_drop::() { for i in self.range() { unsafe { self.force_drop(i) } } @@ -75,6 +81,48 @@ } } +impl HasLength for RingBuffer +where + N: ChunkLength, +{ + /// Get the length of the ring buffer. + #[inline] + #[must_use] + fn len(&self) -> usize { + self.length + } +} + +impl Array for RingBuffer +where + N: ChunkLength, +{ + /// Get a reference to the value at a given index. + #[must_use] + fn get(&self, index: usize) -> Option<&A> { + if index >= self.len() { + None + } else { + Some(unsafe { self.get_unchecked(index) }) + } + } +} + +impl ArrayMut for RingBuffer +where + N: ChunkLength, +{ + /// Get a mutable reference to the value at a given index. + #[must_use] + fn get_mut(&mut self, index: usize) -> Option<&mut A> { + if index >= self.len() { + None + } else { + Some(unsafe { self.get_unchecked_mut(index) }) + } + } +} + impl RingBuffer where N: ChunkLength, @@ -84,60 +132,60 @@ /// Get the raw index for a logical index. #[inline] - fn raw(&self, index: usize) -> RawIndex { + fn raw(&self, index: usize) -> RawIndex { self.origin + index } #[inline] - unsafe fn ptr(&self, index: RawIndex) -> *const A { + unsafe fn ptr(&self, index: RawIndex) -> *const A { debug_assert!(index.to_usize() < Self::CAPACITY); (&self.data as *const _ as *const A).add(index.to_usize()) } #[inline] - unsafe fn mut_ptr(&mut self, index: RawIndex) -> *mut A { + unsafe fn mut_ptr(&mut self, index: RawIndex) -> *mut A { debug_assert!(index.to_usize() < Self::CAPACITY); (&mut self.data as *mut _ as *mut A).add(index.to_usize()) } /// Drop the value at a raw index. #[inline] - unsafe fn force_drop(&mut self, index: RawIndex) { - std::ptr::drop_in_place(self.mut_ptr(index)) + unsafe fn force_drop(&mut self, index: RawIndex) { + core::ptr::drop_in_place(self.mut_ptr(index)) } /// Copy the value at a raw index, discarding ownership of the copied value #[inline] - unsafe fn force_read(&self, index: RawIndex) -> A { - std::ptr::read(self.ptr(index)) + unsafe fn force_read(&self, index: RawIndex) -> A { + core::ptr::read(self.ptr(index)) } /// Write a value at a raw index without trying to drop what's already there #[inline] - unsafe fn force_write(&mut self, index: RawIndex, value: A) { - std::ptr::write(self.mut_ptr(index), value) + unsafe fn force_write(&mut self, index: RawIndex, value: A) { + core::ptr::write(self.mut_ptr(index), value) } /// Copy a range of raw indices from another buffer. unsafe fn copy_from( &mut self, source: &mut Self, - from: RawIndex, - to: RawIndex, + from: RawIndex, + to: RawIndex, count: usize, ) { #[inline] unsafe fn force_copy_to>( source: &mut RingBuffer, - from: RawIndex, + from: RawIndex, target: &mut RingBuffer, - to: RawIndex, + to: RawIndex, count: usize, ) { if count > 0 { debug_assert!(from.to_usize() + count <= RingBuffer::::CAPACITY); debug_assert!(to.to_usize() + count <= RingBuffer::::CAPACITY); - std::ptr::copy_nonoverlapping(source.mut_ptr(from), target.mut_ptr(to), count) + core::ptr::copy_nonoverlapping(source.mut_ptr(from), target.mut_ptr(to), count) } } @@ -157,31 +205,32 @@ } /// Copy values from a slice. - unsafe fn copy_from_slice(&mut self, source: &[A], to: RawIndex) { + #[allow(dead_code)] + unsafe fn copy_from_slice(&mut self, source: &[A], to: RawIndex) { let count = source.len(); debug_assert!(to.to_usize() + count <= Self::CAPACITY); if to.to_usize() + count > Self::CAPACITY { let first_length = Self::CAPACITY - to.to_usize(); let first_slice = &source[..first_length]; let last_slice = &source[first_length..]; - std::ptr::copy_nonoverlapping( + core::ptr::copy_nonoverlapping( first_slice.as_ptr(), self.mut_ptr(to), first_slice.len(), ); - std::ptr::copy_nonoverlapping( + core::ptr::copy_nonoverlapping( last_slice.as_ptr(), self.mut_ptr(0.into()), last_slice.len(), ); } else { - std::ptr::copy_nonoverlapping(source.as_ptr(), self.mut_ptr(to), count) + core::ptr::copy_nonoverlapping(source.as_ptr(), self.mut_ptr(to), count) } } /// Get an iterator over the raw indices of the buffer from left to right. #[inline] - fn range(&self) -> IndexIter { + fn range(&self) -> IndexIter { IndexIter { remaining: self.len(), left_index: self.origin, @@ -281,20 +330,6 @@ buffer } - /// Get the length of the ring buffer. - #[inline] - #[must_use] - pub fn len(&self) -> usize { - self.length - } - - /// Test if the ring buffer is empty. - #[inline] - #[must_use] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - /// Test if the ring buffer is full. #[inline] #[must_use] @@ -320,12 +355,7 @@ #[inline] #[must_use] pub fn iter_mut(&mut self) -> IterMut<'_, A, N> { - IterMut { - left_index: self.origin, - right_index: self.origin + self.len(), - remaining: self.len(), - buffer: self, - } + IterMut::new(self) } #[must_use] @@ -366,60 +396,24 @@ } } - /// Get a reference to the value at a given index. - #[must_use] - pub fn get(&self, index: usize) -> Option<&A> { - if index >= self.len() { - None - } else { - Some(unsafe { &*self.ptr(self.raw(index)) }) - } - } - - /// Get a mutable reference to the value at a given index. - #[must_use] - pub fn get_mut(&mut self, index: usize) -> Option<&mut A> { - if index >= self.len() { - None - } else { - Some(unsafe { &mut *self.mut_ptr(self.raw(index)) }) - } - } - - /// Get a reference to the first value in the buffer. - #[inline] - #[must_use] - pub fn first(&self) -> Option<&A> { - self.get(0) - } - - /// Get a mutable reference to the first value in the buffer. - #[inline] - #[must_use] - pub fn first_mut(&mut self) -> Option<&mut A> { - self.get_mut(0) - } - - /// Get a reference to the last value in the buffer. - #[inline] + /// Get an unchecked reference to the value at the given index. + /// + /// # Safety + /// + /// You must ensure the index is not out of bounds. #[must_use] - pub fn last(&self) -> Option<&A> { - if self.is_empty() { - None - } else { - self.get(self.len() - 1) - } + pub unsafe fn get_unchecked(&self, index: usize) -> &A { + &*self.ptr(self.raw(index)) } - /// Get a mutable reference to the last value in the buffer. - #[inline] + /// Get an unchecked mutable reference to the value at the given index. + /// + /// # Safety + /// + /// You must ensure the index is not out of bounds. #[must_use] - pub fn last_mut(&mut self) -> Option<&mut A> { - if self.is_empty() { - None - } else { - self.get_mut(self.len() - 1) - } + pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut A { + &mut *self.mut_ptr(self.raw(index)) } /// Push a value to the back of the buffer. @@ -596,15 +590,6 @@ self.length += count; } - /// Update the value at index `index`, returning the old value. - /// - /// Panics if `index` is out of bounds. - /// - /// Time: O(1) - pub fn set(&mut self, index: usize, value: A) -> A { - std::mem::replace(&mut self[index], value) - } - /// Insert a new value at index `index`, shifting all the following values /// to the right. /// @@ -647,6 +632,94 @@ unsafe { self.force_write(self.raw(index), value) }; } + /// Insert a new value into the buffer in sorted order. + /// + /// This assumes every element of the buffer is already in sorted order. + /// If not, the value will still be inserted but the ordering is not + /// guaranteed. + /// + /// Time: O(log n) to find the insert position, then O(n) for the number + /// of elements shifted. + /// + /// # Examples + /// + /// ```rust + /// # use std::iter::FromIterator; + /// # use sized_chunks::Chunk; + /// # use typenum::U64; + /// let mut chunk = Chunk::::from_iter(0..5); + /// chunk.insert_ordered(3); + /// assert_eq!(&[0, 1, 2, 3, 3, 4], chunk.as_slice()); + /// ``` + pub fn insert_ordered(&mut self, value: A) + where + A: Ord, + { + if self.is_full() { + panic!("Chunk::insert: chunk is full"); + } + match self.slice(..).binary_search(&value) { + Ok(index) => self.insert(index, value), + Err(index) => self.insert(index, value), + } + } + + /// Insert multiple values at index `index`, shifting all the following values + /// to the right. + /// + /// Panics if the index is out of bounds or the chunk doesn't have room for + /// all the values. + /// + /// Time: O(m+n) where m is the number of elements inserted and n is the number + /// of elements following the insertion index. Calling `insert` + /// repeatedly would be O(m*n). + pub fn insert_from(&mut self, index: usize, iter: Iterable) + where + Iterable: IntoIterator, + I: ExactSizeIterator, + { + let iter = iter.into_iter(); + let insert_size = iter.len(); + if self.len() + insert_size > Self::CAPACITY { + panic!( + "Chunk::insert_from: chunk cannot fit {} elements", + insert_size + ); + } + if index > self.len() { + panic!("Chunk::insert_from: index out of bounds"); + } + if index == self.len() { + self.extend(iter); + return; + } + let right_count = self.len() - index; + // Check which side has fewer elements to shift. + if right_count < index { + // Shift to the right. + let mut i = self.raw(self.len() - 1); + let target = self.raw(index); + while i != target { + unsafe { self.force_write(i + insert_size, self.force_read(i)) }; + i -= 1; + } + unsafe { self.force_write(target + insert_size, self.force_read(target)) }; + self.length += insert_size; + } else { + // Shift to the left. + self.origin -= insert_size; + self.length += insert_size; + for i in self.range().take(index) { + unsafe { self.force_write(i, self.force_read(i + insert_size)) }; + } + } + let mut index = self.raw(index); + for value in iter { + unsafe { self.force_write(index, value) }; + index += 1; + } + } + /// Remove the value at index `index`, shifting all the following values to /// the left. /// @@ -765,20 +838,40 @@ } } -impl PartialEq for RingBuffer +impl PartialEq for RingBuffer where - Slice: Borrow<[A]>, + PrimSlice: Borrow<[A]>, A: PartialEq, N: ChunkLength, { #[inline] #[must_use] - fn eq(&self, other: &Slice) -> bool { + fn eq(&self, other: &PrimSlice) -> bool { let other = other.borrow(); self.len() == other.len() && self.iter().eq(other.iter()) } } +impl PartialEq> for RingBuffer +where + A: PartialEq, + N: ChunkLength, +{ + fn eq(&self, other: &Slice<'_, A, N>) -> bool { + self.len() == other.len() && self.iter().eq(other.iter()) + } +} + +impl PartialEq> for RingBuffer +where + A: PartialEq, + N: ChunkLength, +{ + fn eq(&self, other: &SliceMut<'_, A, N>) -> bool { + self.len() == other.len() && self.iter().eq(other.iter()) + } +} + impl> Eq for RingBuffer {} impl> PartialOrd for RingBuffer { @@ -831,6 +924,7 @@ } } +#[cfg(feature = "std")] impl> std::io::Write for RingBuffer { fn write(&mut self, mut buf: &[u8]) -> std::io::Result { let max_new = Self::CAPACITY - self.len(); @@ -848,6 +942,7 @@ } } +#[cfg(feature = "std")] impl> std::io::Read for RingBuffer { fn read(&mut self, buf: &mut [u8]) -> std::io::Result { let read_size = buf.len().min(self.len()); @@ -911,6 +1006,11 @@ use super::*; #[test] + fn validity_invariant() { + assert!(Some(RingBuffer::>::new()).is_some()); + } + + #[test] fn is_full() { let mut chunk = RingBuffer::<_, U64>::new(); for i in 0..64 { @@ -957,6 +1057,7 @@ assert_eq!(half, should); } + #[cfg(feature = "std")] #[test] fn io_write() { use std::io::Write; @@ -966,6 +1067,7 @@ assert_eq!(buffer, (0..64).collect::>()); } + #[cfg(feature = "std")] #[test] fn io_read() { use std::io::Read; diff -Nru cargo-0.44.1/vendor/sized-chunks/src/ring_buffer/refpool.rs cargo-0.47.0/vendor/sized-chunks/src/ring_buffer/refpool.rs --- cargo-0.44.1/vendor/sized-chunks/src/ring_buffer/refpool.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/ring_buffer/refpool.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,4 @@ -use std::mem::MaybeUninit; +use core::mem::MaybeUninit; use ::refpool::{PoolClone, PoolDefault}; @@ -12,7 +12,7 @@ { unsafe fn default_uninit(target: &mut MaybeUninit) { let ptr = target.as_mut_ptr(); - let origin_ptr: *mut RawIndex = &mut (*ptr).origin; + let origin_ptr: *mut RawIndex = &mut (*ptr).origin; let length_ptr: *mut usize = &mut (*ptr).length; origin_ptr.write(0.into()); length_ptr.write(0); @@ -26,7 +26,7 @@ { unsafe fn clone_uninit(&self, target: &mut MaybeUninit) { let ptr = target.as_mut_ptr(); - let origin_ptr: *mut RawIndex = &mut (*ptr).origin; + let origin_ptr: *mut RawIndex = &mut (*ptr).origin; let length_ptr: *mut usize = &mut (*ptr).length; let data_ptr: *mut _ = &mut (*ptr).data; let data_ptr: *mut A = (*data_ptr).as_mut_ptr().cast(); @@ -56,7 +56,7 @@ chunk.push_back(2); chunk.push_back(3); } - let ref2 = ref1.cloned(&pool); + let ref2 = PoolRef::cloned(&pool, &ref1); let ref3 = PoolRef::clone_from(&pool, &RingBuffer::from_iter(1..=3)); assert_eq!(RingBuffer::::from_iter(1..=3), *ref1); assert_eq!(RingBuffer::::from_iter(1..=3), *ref2); diff -Nru cargo-0.44.1/vendor/sized-chunks/src/ring_buffer/slice.rs cargo-0.47.0/vendor/sized-chunks/src/ring_buffer/slice.rs --- cargo-0.44.1/vendor/sized-chunks/src/ring_buffer/slice.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/ring_buffer/slice.rs 2020-10-01 21:38:28.000000000 +0000 @@ -2,19 +2,20 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. -use std::borrow::Borrow; -use std::cmp::Ordering; -use std::fmt::Debug; -use std::fmt::Error; -use std::fmt::Formatter; -use std::hash::Hash; -use std::hash::Hasher; -use std::ops::IndexMut; -use std::ops::{Bound, Index, Range, RangeBounds}; +use core::borrow::Borrow; +use core::cmp::Ordering; +use core::fmt::Debug; +use core::fmt::Error; +use core::fmt::Formatter; +use core::hash::Hash; +use core::hash::Hasher; +use core::ops::IndexMut; +use core::ops::{Bound, Index, Range, RangeBounds}; +use super::{Iter, IterMut, RingBuffer}; use crate::types::ChunkLength; -use super::{Iter, IterMut, RingBuffer}; +use array_ops::{Array, ArrayMut, HasLength}; /// An indexable representation of a subset of a `RingBuffer`. pub struct Slice<'a, A, N: ChunkLength> { @@ -22,48 +23,37 @@ pub(crate) range: Range, } -impl<'a, A: 'a, N: ChunkLength + 'a> Slice<'a, A, N> { +impl<'a, A: 'a, N: ChunkLength + 'a> HasLength for Slice<'a, A, N> { /// Get the length of the slice. #[inline] #[must_use] - pub fn len(&self) -> usize { + fn len(&self) -> usize { self.range.end - self.range.start } +} - /// Test if the slice is empty. - #[inline] - #[must_use] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - +impl<'a, A: 'a, N: ChunkLength + 'a> Array for Slice<'a, A, N> { /// Get a reference to the value at a given index. #[inline] #[must_use] - pub fn get(&self, index: usize) -> Option<&'a A> { + fn get(&self, index: usize) -> Option<&A> { if index >= self.len() { None } else { - self.buffer.get(self.range.start + index) + Some(unsafe { self.get_unchecked(index) }) } } +} - /// Get a reference to the first value in the slice. - #[inline] - #[must_use] - pub fn first(&self) -> Option<&A> { - self.get(0) - } - - /// Get a reference to the last value in the slice. - #[inline] +impl<'a, A: 'a, N: ChunkLength + 'a> Slice<'a, A, N> { + /// Get an unchecked reference to the value at the given index. + /// + /// # Safety + /// + /// You must ensure the index is not out of bounds. #[must_use] - pub fn last(&self) -> Option<&A> { - if self.is_empty() { - None - } else { - self.get(self.len() - 1) - } + pub unsafe fn get_unchecked(&self, index: usize) -> &A { + self.buffer.get_unchecked(self.range.start + index) } /// Get an iterator over references to the items in the slice in order. @@ -187,6 +177,26 @@ } } +impl<'a, A: PartialEq + 'a, N: ChunkLength + 'a> PartialEq> + for Slice<'a, A, N> +{ + #[inline] + #[must_use] + fn eq(&self, other: &SliceMut<'a, A, N>) -> bool { + self.len() == other.len() && self.iter().eq(other.iter()) + } +} + +impl<'a, A: PartialEq + 'a, N: ChunkLength + 'a> PartialEq> + for Slice<'a, A, N> +{ + #[inline] + #[must_use] + fn eq(&self, other: &RingBuffer) -> bool { + self.len() == other.len() && self.iter().eq(other.iter()) + } +} + impl<'a, A: PartialEq + 'a, N: ChunkLength + 'a, S> PartialEq for Slice<'a, A, N> where S: Borrow<[A]>, @@ -252,91 +262,70 @@ pub(crate) range: Range, } -impl<'a, A: 'a, N: ChunkLength + 'a> SliceMut<'a, A, N> { - /// Downgrade this slice into a non-mutable slice. - #[inline] - #[must_use] - pub fn unmut(self) -> Slice<'a, A, N> { - Slice { - buffer: self.buffer, - range: self.range, - } - } - +impl<'a, A: 'a, N: ChunkLength + 'a> HasLength for SliceMut<'a, A, N> { /// Get the length of the slice. #[inline] #[must_use] - pub fn len(&self) -> usize { + fn len(&self) -> usize { self.range.end - self.range.start } +} - /// Test if the slice is empty. - #[inline] - #[must_use] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - +impl<'a, A: 'a, N: ChunkLength + 'a> Array for SliceMut<'a, A, N> { /// Get a reference to the value at a given index. #[inline] #[must_use] - pub fn get(&self, index: usize) -> Option<&'a A> { + fn get(&self, index: usize) -> Option<&A> { if index >= self.len() { None } else { - self.buffer - .get(self.range.start + index) - .map(|r| unsafe { &*(r as *const _) }) + Some(unsafe { self.get_unchecked(index) }) } } +} +impl<'a, A: 'a, N: ChunkLength + 'a> ArrayMut for SliceMut<'a, A, N> { /// Get a mutable reference to the value at a given index. #[inline] #[must_use] - pub fn get_mut(&mut self, index: usize) -> Option<&'a mut A> { + fn get_mut(&mut self, index: usize) -> Option<&mut A> { if index >= self.len() { None } else { - self.buffer - .get_mut(self.range.start + index) - .map(|r| unsafe { &mut *(r as *mut _) }) + Some(unsafe { self.get_unchecked_mut(index) }) } } +} - /// Get a reference to the first value in the slice. - #[inline] - #[must_use] - pub fn first(&self) -> Option<&A> { - self.get(0) - } - - /// Get a mutable reference to the first value in the slice. +impl<'a, A: 'a, N: ChunkLength + 'a> SliceMut<'a, A, N> { + /// Downgrade this slice into a non-mutable slice. #[inline] #[must_use] - pub fn first_mut(&mut self) -> Option<&mut A> { - self.get_mut(0) + pub fn unmut(self) -> Slice<'a, A, N> { + Slice { + buffer: self.buffer, + range: self.range, + } } - /// Get a reference to the last value in the slice. - #[inline] + /// Get an unchecked reference to the value at the given index. + /// + /// # Safety + /// + /// You must ensure the index is not out of bounds. #[must_use] - pub fn last(&self) -> Option<&A> { - if self.is_empty() { - None - } else { - self.get(self.len() - 1) - } + pub unsafe fn get_unchecked(&self, index: usize) -> &A { + self.buffer.get_unchecked(self.range.start + index) } - /// Get a mutable reference to the last value in the slice. - #[inline] + /// Get an unchecked mutable reference to the value at the given index. + /// + /// # Safety + /// + /// You must ensure the index is not out of bounds. #[must_use] - pub fn last_mut(&mut self) -> Option<&mut A> { - if self.is_empty() { - None - } else { - self.get_mut(self.len() - 1) - } + pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut A { + self.buffer.get_unchecked_mut(self.range.start + index) } /// Get an iterator over references to the items in the slice in order. @@ -356,14 +345,11 @@ #[inline] #[must_use] pub fn iter_mut(&mut self) -> IterMut<'_, A, N> { - let origin = self.buffer.origin; - let len = self.len(); - IterMut { - buffer: self.buffer, - left_index: origin + self.range.start, - right_index: origin + self.range.start + len, - remaining: len, - } + IterMut::new_slice( + self.buffer, + self.buffer.origin + self.range.start, + self.len(), + ) } /// Create a subslice of this slice. @@ -423,19 +409,6 @@ ) } - /// Update the value at index `index`, returning the old value. - /// - /// Panics if `index` is out of bounds. - #[inline] - #[must_use] - pub fn set(&mut self, index: usize, value: A) -> A { - if index >= self.len() { - panic!("SliceMut::set: index out of bounds"); - } else { - self.buffer.set(self.range.start + index, value) - } - } - /// Construct a new `RingBuffer` by copying the elements in this slice. #[inline] #[must_use] @@ -493,6 +466,26 @@ self.len() == other.len() && self.iter().eq(other.iter()) } } + +impl<'a, A: PartialEq + 'a, N: ChunkLength + 'a> PartialEq> + for SliceMut<'a, A, N> +{ + #[inline] + #[must_use] + fn eq(&self, other: &Slice<'a, A, N>) -> bool { + self.len() == other.len() && self.iter().eq(other.iter()) + } +} + +impl<'a, A: PartialEq + 'a, N: ChunkLength + 'a> PartialEq> + for SliceMut<'a, A, N> +{ + #[inline] + #[must_use] + fn eq(&self, other: &RingBuffer) -> bool { + self.len() == other.len() && self.iter().eq(other.iter()) + } +} impl<'a, A: PartialEq + 'a, N: ChunkLength + 'a, S> PartialEq for SliceMut<'a, A, N> where diff -Nru cargo-0.44.1/vendor/sized-chunks/src/sized_chunk/iter.rs cargo-0.47.0/vendor/sized-chunks/src/sized_chunk/iter.rs --- cargo-0.44.1/vendor/sized-chunks/src/sized_chunk/iter.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/sized_chunk/iter.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,4 @@ -use std::iter::FusedIterator; +use core::iter::FusedIterator; use super::Chunk; use crate::types::ChunkLength; diff -Nru cargo-0.44.1/vendor/sized-chunks/src/sized_chunk/mod.rs cargo-0.47.0/vendor/sized-chunks/src/sized_chunk/mod.rs --- cargo-0.44.1/vendor/sized-chunks/src/sized_chunk/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/sized_chunk/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -7,19 +7,21 @@ //! See [`Chunk`](struct.Chunk.html) use crate::inline_array::InlineArray; -use std::borrow::{Borrow, BorrowMut}; -use std::cmp::Ordering; -use std::fmt::{Debug, Error, Formatter}; -use std::hash::{Hash, Hasher}; -use std::io; -use std::iter::FromIterator; -use std::mem::{replace, MaybeUninit}; -use std::ops::{Deref, DerefMut, Index, IndexMut}; -use std::ptr; -use std::slice::{ +use core::borrow::{Borrow, BorrowMut}; +use core::cmp::Ordering; +use core::fmt::{Debug, Error, Formatter}; +use core::hash::{Hash, Hasher}; +use core::iter::FromIterator; +use core::mem::{replace, MaybeUninit}; +use core::ops::{Deref, DerefMut, Index, IndexMut}; +use core::ptr; +use core::slice::{ from_raw_parts, from_raw_parts_mut, Iter as SliceIter, IterMut as SliceIterMut, SliceIndex, }; +#[cfg(feature = "std")] +use std::io; + use typenum::U64; use crate::types::ChunkLength; @@ -247,7 +249,7 @@ /// Test if the chunk is at capacity. #[inline] pub fn is_full(&self) -> bool { - self.left == 0 && self.right == N::USIZE + self.left == 0 && self.right == Self::CAPACITY } #[inline] @@ -769,6 +771,7 @@ } } +#[cfg(feature = "std")] impl io::Write for Chunk where N: ChunkLength, @@ -784,6 +787,7 @@ } } +#[cfg(feature = "std")] impl> std::io::Read for Chunk { fn read(&mut self, buf: &mut [u8]) -> std::io::Result { let read_size = buf.len().min(self.len()); diff -Nru cargo-0.44.1/vendor/sized-chunks/src/sized_chunk/refpool.rs cargo-0.47.0/vendor/sized-chunks/src/sized_chunk/refpool.rs --- cargo-0.44.1/vendor/sized-chunks/src/sized_chunk/refpool.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/sized_chunk/refpool.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,4 @@ -use std::mem::MaybeUninit; +use core::mem::MaybeUninit; use ::refpool::{PoolClone, PoolDefault}; @@ -53,7 +53,7 @@ chunk.push_back(2); chunk.push_back(3); } - let ref2 = ref1.cloned(&pool); + let ref2 = PoolRef::cloned(&pool, &ref1); let ref3 = PoolRef::clone_from(&pool, &Chunk::from_iter(1..=3)); assert_eq!(Chunk::::from_iter(1..=3), *ref1); assert_eq!(Chunk::::from_iter(1..=3), *ref2); diff -Nru cargo-0.44.1/vendor/sized-chunks/src/sparse_chunk/iter.rs cargo-0.47.0/vendor/sized-chunks/src/sparse_chunk/iter.rs --- cargo-0.44.1/vendor/sized-chunks/src/sparse_chunk/iter.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/sparse_chunk/iter.rs 2020-10-01 21:38:28.000000000 +0000 @@ -168,47 +168,78 @@ #[cfg(test)] mod test { use super::*; - use proptest::{collection::vec, num::usize, option::of, prop_assert, proptest}; + use std::iter::FromIterator; use typenum::U64; - proptest! { - #[test] - fn iter(ref vec in vec(of(usize::ANY), 0..64)) { - let chunk: SparseChunk<_, U64> = vec.iter().cloned().collect(); - let vec: Vec = vec.iter().cloned().filter(|v| v.is_some()).map(|v| v.unwrap()).collect(); - prop_assert!(vec.iter().eq(chunk.iter())); - } - - #[test] - fn iter_mut(ref vec in vec(of(usize::ANY), 0..64)) { - let mut chunk: SparseChunk<_, U64> = vec.iter().cloned().collect(); - let mut vec: Vec = vec.iter().cloned().filter(|v| v.is_some()).map(|v| v.unwrap()).collect(); - prop_assert!(vec.iter_mut().eq(chunk.iter_mut())); - } - - #[test] - fn drain(ref vec in vec(of(usize::ANY), 0..64)) { - let chunk: SparseChunk<_, U64> = vec.iter().cloned().collect(); - let vec: Vec = vec.iter().cloned().filter(|v| v.is_some()).map(|v| v.unwrap()).collect(); - prop_assert!(vec.into_iter().eq(chunk.into_iter())); - } - - #[test] - fn option_iter(ref vec in vec(of(usize::ANY), 64)) { - let chunk: SparseChunk<_, U64> = vec.iter().cloned().collect(); - prop_assert!(vec.iter().cloned().eq(chunk.option_iter().map(|v| v.cloned()))); - } - - #[test] - fn option_iter_mut(ref vec in vec(of(usize::ANY), 64)) { - let mut chunk: SparseChunk<_, U64> = vec.iter().cloned().collect(); - prop_assert!(vec.iter().cloned().eq(chunk.option_iter_mut().map(|v| v.cloned()))); - } - - #[test] - fn option_drain(ref vec in vec(of(usize::ANY), 64)) { - let chunk: SparseChunk<_, U64> = vec.iter().cloned().collect(); - prop_assert!(vec.iter().cloned().eq(chunk.option_drain())); - } + #[test] + fn iter() { + let vec: Vec> = + Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None })); + let chunk: SparseChunk = vec.iter().cloned().collect(); + let vec: Vec = vec + .iter() + .cloned() + .filter(|v| v.is_some()) + .map(|v| v.unwrap()) + .collect(); + assert!(vec.iter().eq(chunk.iter())); + } + + #[test] + fn iter_mut() { + let vec: Vec> = + Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None })); + let mut chunk: SparseChunk<_, U64> = vec.iter().cloned().collect(); + let mut vec: Vec = vec + .iter() + .cloned() + .filter(|v| v.is_some()) + .map(|v| v.unwrap()) + .collect(); + assert!(vec.iter_mut().eq(chunk.iter_mut())); + } + + #[test] + fn drain() { + let vec: Vec> = + Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None })); + let chunk: SparseChunk<_, U64> = vec.iter().cloned().collect(); + let vec: Vec = vec + .iter() + .cloned() + .filter(|v| v.is_some()) + .map(|v| v.unwrap()) + .collect(); + assert!(vec.into_iter().eq(chunk.into_iter())); + } + + #[test] + fn option_iter() { + let vec: Vec> = + Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None })); + let chunk: SparseChunk<_, U64> = vec.iter().cloned().collect(); + assert!(vec + .iter() + .cloned() + .eq(chunk.option_iter().map(|v| v.cloned()))); + } + + #[test] + fn option_iter_mut() { + let vec: Vec> = + Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None })); + let mut chunk: SparseChunk<_, U64> = vec.iter().cloned().collect(); + assert!(vec + .iter() + .cloned() + .eq(chunk.option_iter_mut().map(|v| v.cloned()))); + } + + #[test] + fn option_drain() { + let vec: Vec> = + Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None })); + let chunk: SparseChunk<_, U64> = vec.iter().cloned().collect(); + assert!(vec.iter().cloned().eq(chunk.option_drain())); } } diff -Nru cargo-0.44.1/vendor/sized-chunks/src/sparse_chunk/mod.rs cargo-0.47.0/vendor/sized-chunks/src/sparse_chunk/mod.rs --- cargo-0.44.1/vendor/sized-chunks/src/sparse_chunk/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/sparse_chunk/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -6,14 +6,16 @@ //! //! See [`SparseChunk`](struct.SparseChunk.html) +use core::fmt::{Debug, Error, Formatter}; +use core::iter::FromIterator; +use core::mem::{self, MaybeUninit}; +use core::ops::Index; +use core::ops::IndexMut; +use core::ptr; +use core::slice::{from_raw_parts, from_raw_parts_mut}; + +#[cfg(feature = "std")] use std::collections::{BTreeMap, HashMap}; -use std::fmt::{Debug, Error, Formatter}; -use std::iter::FromIterator; -use std::mem::{self, MaybeUninit}; -use std::ops::Index; -use std::ops::IndexMut; -use std::ptr; -use std::slice::{from_raw_parts, from_raw_parts_mut}; use typenum::U64; @@ -200,7 +202,7 @@ return None; } if self.map.get(index) { - Some(&self.values()[index]) + Some(unsafe { self.get_unchecked(index) }) } else { None } @@ -212,12 +214,32 @@ return None; } if self.map.get(index) { - Some(&mut self.values_mut()[index]) + Some(unsafe { self.get_unchecked_mut(index) }) } else { None } } + /// Get an unchecked reference to the value at a given index. + /// + /// # Safety + /// + /// Uninhabited indices contain uninitialised data, so make sure you validate + /// the index before using this method. + pub unsafe fn get_unchecked(&self, index: usize) -> &A { + self.values().get_unchecked(index) + } + + /// Get an unchecked mutable reference to the value at a given index. + /// + /// # Safety + /// + /// Uninhabited indices contain uninitialised data, so make sure you validate + /// the index before using this method. + pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut A { + self.values_mut().get_unchecked_mut(index) + } + /// Make an iterator over the indices which contain values. pub fn indices(&self) -> BitmapIter<'_, N> { self.map.into_iter() @@ -355,6 +377,7 @@ } } +#[cfg(feature = "std")] impl PartialEq> for SparseChunk where A: PartialEq, @@ -373,6 +396,7 @@ } } +#[cfg(feature = "std")] impl PartialEq> for SparseChunk where A: PartialEq, diff -Nru cargo-0.44.1/vendor/sized-chunks/src/sparse_chunk/refpool.rs cargo-0.47.0/vendor/sized-chunks/src/sparse_chunk/refpool.rs --- cargo-0.44.1/vendor/sized-chunks/src/sparse_chunk/refpool.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/sparse_chunk/refpool.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,4 @@ -use std::mem::MaybeUninit; +use core::mem::MaybeUninit; use bitmaps::{Bitmap, Bits}; @@ -50,7 +50,7 @@ chunk.insert(10, 37); chunk.insert(31, 337); } - let ref2 = ref1.cloned(&pool); + let ref2 = PoolRef::cloned(&pool, &ref1); assert_eq!(ref1, ref2); assert!(!PoolRef::ptr_eq(&ref1, &ref2)); } diff -Nru cargo-0.44.1/vendor/sized-chunks/src/tests/inline_array.rs cargo-0.47.0/vendor/sized-chunks/src/tests/inline_array.rs --- cargo-0.44.1/vendor/sized-chunks/src/tests/inline_array.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/tests/inline_array.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,102 +0,0 @@ -#![allow(clippy::unit_arg)] - -use std::panic::{catch_unwind, AssertUnwindSafe}; - -use proptest::{arbitrary::any, collection::vec, prelude::*, proptest}; -use proptest_derive::Arbitrary; - -use crate::inline_array::InlineArray; - -#[test] -fn validity_invariant() { - assert!(Some(InlineArray::; 2]>::new()).is_some()); - - let mut chunk = InlineArray::; 2]>::new(); - chunk.push(0); - assert!(Some(chunk).is_some()); -} - -type TestType = [usize; 16]; - -#[derive(Arbitrary, Debug)] -enum Action -where - A: Arbitrary, - ::Strategy: 'static, -{ - Push(A), - Pop, - Set((usize, A)), - Insert(usize, A), - Remove(usize), - SplitOff(usize), - Drain, - Clear, -} - -proptest! { - #[test] - fn test_actions(actions in vec(any::>(), 0..super::action_count())) { - let capacity = InlineArray::::CAPACITY; - let mut chunk = InlineArray::::new(); - let mut guide: Vec<_> = chunk.iter().cloned().collect(); - for action in actions { - match action { - Action::Push(value) => { - if chunk.is_full() { - assert!(catch_unwind(AssertUnwindSafe(|| chunk.push(value))).is_err()); - } else { - chunk.push(value); - guide.push(value); - } - } - Action::Pop => { - assert_eq!(chunk.pop(), guide.pop()); - } - Action::Set((index, value)) => { - if index >= chunk.len() { - assert!(catch_unwind(AssertUnwindSafe(|| chunk[index] = value)).is_err()); - } else { - chunk[index] = value; - guide[index] = value; - } - } - Action::Insert(index, value) => { - if index > chunk.len() || chunk.is_full() { - assert!(catch_unwind(AssertUnwindSafe(|| chunk.insert(index, value))).is_err()); - } else { - chunk.insert(index, value); - guide.insert(index, value); - } - } - Action::Remove(index) => { - if index >= chunk.len() { - assert_eq!(None, chunk.remove(index)); - } else { - assert_eq!(chunk.remove(index), Some(guide.remove(index))); - } - } - Action::SplitOff(index) => { - if index > chunk.len() { - assert!(catch_unwind(AssertUnwindSafe(|| chunk.split_off(index))).is_err()); - } else { - let chunk_off = chunk.split_off(index); - let guide_off = guide.split_off(index); - assert_eq!(chunk_off, guide_off); - } - } - Action::Drain => { - let drained: Vec<_> = chunk.drain().collect(); - let drained_guide: Vec<_> = guide.drain(..).collect(); - assert_eq!(drained, drained_guide); - } - Action::Clear => { - chunk.clear(); - guide.clear(); - } - } - assert_eq!(chunk, guide); - assert!(guide.len() <= capacity); - } - } -} diff -Nru cargo-0.44.1/vendor/sized-chunks/src/tests/mod.rs cargo-0.47.0/vendor/sized-chunks/src/tests/mod.rs --- cargo-0.44.1/vendor/sized-chunks/src/tests/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/tests/mod.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -use std::sync::atomic::{AtomicUsize, Ordering}; - -mod inline_array; -mod ring_buffer; -mod sized_chunk; -mod sparse_chunk; - -pub(crate) fn action_count() -> usize { - std::env::var("ACTION_COUNT") - .ok() - .and_then(|s| s.parse().ok()) - .unwrap_or(100) -} - -pub(crate) struct DropTest<'a> { - counter: &'a AtomicUsize, -} - -impl<'a> DropTest<'a> { - pub(crate) fn new(counter: &'a AtomicUsize) -> Self { - counter.fetch_add(1, Ordering::Relaxed); - DropTest { counter } - } -} - -impl<'a> Drop for DropTest<'a> { - fn drop(&mut self) { - self.counter.fetch_sub(1, Ordering::Relaxed); - } -} diff -Nru cargo-0.44.1/vendor/sized-chunks/src/tests/ring_buffer.rs cargo-0.47.0/vendor/sized-chunks/src/tests/ring_buffer.rs --- cargo-0.44.1/vendor/sized-chunks/src/tests/ring_buffer.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/tests/ring_buffer.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,296 +0,0 @@ -#![allow(clippy::unit_arg)] - -use std::fmt::Debug; -use std::iter::FromIterator; -use std::panic::{catch_unwind, AssertUnwindSafe}; - -use proptest::{arbitrary::any, collection::vec, prelude::*, proptest}; -use proptest_derive::Arbitrary; - -use crate::ring_buffer::RingBuffer; - -#[test] -fn validity_invariant() { - assert!(Some(RingBuffer::>::new()).is_some()); -} - -#[derive(Debug)] -struct InputVec(Vec); - -impl InputVec { - fn unwrap(self) -> Vec { - self.0 - } -} - -impl Arbitrary for InputVec -where - A: Arbitrary + Debug, - ::Strategy: 'static, -{ - type Parameters = usize; - type Strategy = BoxedStrategy>; - fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { - #[allow(clippy::redundant_closure)] - proptest::collection::vec(any::(), 0..RingBuffer::::CAPACITY) - .prop_map(|v| InputVec(v)) - .boxed() - } -} - -#[derive(Arbitrary, Debug)] -enum Construct -where - A: Arbitrary, - ::Strategy: 'static, -{ - Empty, - Single(A), - Pair((A, A)), - DrainFrom(InputVec), - CollectFrom(InputVec, usize), - FromFront(InputVec, usize), - FromBack(InputVec, usize), - FromIter(InputVec), -} - -#[derive(Arbitrary, Debug)] -enum Action -where - A: Arbitrary, - ::Strategy: 'static, -{ - PushFront(A), - PushBack(A), - PopFront, - PopBack, - DropLeft(usize), - DropRight(usize), - SplitOff(usize), - Append(Construct), - DrainFromFront(Construct, usize), - DrainFromBack(Construct, usize), - Set(usize, A), - Insert(usize, A), - Remove(usize), - Drain, - Clear, -} - -impl Construct -where - A: Arbitrary + Clone + Debug + Eq, - ::Strategy: 'static, -{ - fn make(self) -> RingBuffer { - match self { - Construct::Empty => { - let out = RingBuffer::new(); - assert!(out.is_empty()); - out - } - Construct::Single(value) => { - let out = RingBuffer::unit(value.clone()); - assert_eq!(out, vec![value]); - out - } - Construct::Pair((left, right)) => { - let out = RingBuffer::pair(left.clone(), right.clone()); - assert_eq!(out, vec![left, right]); - out - } - Construct::DrainFrom(vec) => { - let vec = vec.unwrap(); - let mut source = RingBuffer::from_iter(vec.iter().cloned()); - let out = RingBuffer::drain_from(&mut source); - assert!(source.is_empty()); - assert_eq!(out, vec); - out - } - Construct::CollectFrom(vec, len) => { - let mut vec = vec.unwrap(); - if vec.is_empty() { - return RingBuffer::new(); - } - let len = len % vec.len(); - let mut source = vec.clone().into_iter(); - let out = RingBuffer::collect_from(&mut source, len); - let expected_remainder = vec.split_off(len); - let remainder: Vec<_> = source.collect(); - assert_eq!(expected_remainder, remainder); - assert_eq!(out, vec); - out - } - Construct::FromFront(vec, len) => { - let mut vec = vec.unwrap(); - if vec.is_empty() { - return RingBuffer::new(); - } - let len = len % vec.len(); - let mut source = RingBuffer::from_iter(vec.iter().cloned()); - let out = RingBuffer::from_front(&mut source, len); - let remainder = vec.split_off(len); - assert_eq!(source, remainder); - assert_eq!(out, vec); - out - } - Construct::FromBack(vec, len) => { - let mut vec = vec.unwrap(); - if vec.is_empty() { - return RingBuffer::new(); - } - let len = len % vec.len(); - let mut source = RingBuffer::from_iter(vec.iter().cloned()); - let out = RingBuffer::from_back(&mut source, len); - let remainder = vec.split_off(vec.len() - len); - assert_eq!(out, remainder); - assert_eq!(source, vec); - out - } - Construct::FromIter(vec) => { - let vec = vec.unwrap(); - let out = vec.clone().into_iter().collect(); - assert_eq!(out, vec); - out - } - } - } -} - -proptest! { - #[test] - fn test_constructors(cons: Construct) { - cons.make(); - } - - #[test] - fn test_actions(cons: Construct, actions in vec(any::>(), 0..super::action_count())) { - let capacity = RingBuffer::::CAPACITY; - let mut chunk = cons.make(); - let mut guide: Vec<_> = chunk.iter().cloned().collect(); - println!("{:?}", actions); - for action in actions { - println!("Executing {:?} on {:?}", action, chunk); - match action { - Action::PushFront(value) => { - if chunk.is_full() { - assert!(catch_unwind(AssertUnwindSafe(|| chunk.push_front(value))).is_err()); - } else { - chunk.push_front(value); - guide.insert(0, value); - } - } - Action::PushBack(value) => { - if chunk.is_full() { - assert!(catch_unwind(AssertUnwindSafe(|| chunk.push_back(value))).is_err()); - } else { - chunk.push_back(value); - guide.push(value); - } - } - Action::PopFront => { - assert_eq!(chunk.pop_front(), if guide.is_empty() { - None - } else { - Some(guide.remove(0)) - }); - } - Action::PopBack => { - assert_eq!(chunk.pop_back(), guide.pop()); - } - Action::DropLeft(index) => { - if index > chunk.len() { - assert!(catch_unwind(AssertUnwindSafe(|| chunk.drop_left(index))).is_err()); - } else { - chunk.drop_left(index); - guide.drain(..index); - } - } - Action::DropRight(index) => { - if index > chunk.len() { - assert!(catch_unwind(AssertUnwindSafe(|| chunk.drop_right(index))).is_err()); - } else { - chunk.drop_right(index); - guide.drain(index..); - } - } - Action::SplitOff(index) => { - if index > chunk.len() { - assert!(catch_unwind(AssertUnwindSafe(|| chunk.split_off(index))).is_err()); - } else { - let chunk_off = chunk.split_off(index); - let guide_off = guide.split_off(index); - assert_eq!(chunk_off, guide_off); - } - } - Action::Append(other) => { - let mut other = other.make(); - let mut other_guide: Vec<_> = other.iter().cloned().collect(); - if other.len() + chunk.len() > capacity { - assert!(catch_unwind(AssertUnwindSafe(|| chunk.append(&mut other))).is_err()); - } else { - chunk.append(&mut other); - guide.append(&mut other_guide); - } - } - Action::DrainFromFront(other, count) => { - let mut other = other.make(); - let mut other_guide: Vec<_> = other.iter().cloned().collect(); - if count > other.len() || chunk.len() + count > capacity { - assert!(catch_unwind(AssertUnwindSafe(|| chunk.drain_from_front(&mut other, count))).is_err()); - } else { - chunk.drain_from_front(&mut other, count); - guide.extend(other_guide.drain(..count)); - assert_eq!(other, other_guide); - } - } - Action::DrainFromBack(other, count) => { - let mut other = other.make(); - let mut other_guide: Vec<_> = other.iter().cloned().collect(); - if count > other.len() || chunk.len() + count > capacity { - assert!(catch_unwind(AssertUnwindSafe(|| chunk.drain_from_back(&mut other, count))).is_err()); - } else { - let other_index = other.len() - count; - chunk.drain_from_back(&mut other, count); - guide = other_guide.drain(other_index..).chain(guide.into_iter()).collect(); - assert_eq!(other, other_guide); - } - } - Action::Set(index, value) => { - if index >= chunk.len() { - assert!(catch_unwind(AssertUnwindSafe(|| chunk.set(index, value))).is_err()); - } else { - chunk.set(index, value); - guide[index] = value; - } - } - Action::Insert(index, value) => { - if index > chunk.len() || chunk.is_full() { - assert!(catch_unwind(AssertUnwindSafe(|| chunk.insert(index, value))).is_err()); - } else { - chunk.insert(index, value); - guide.insert(index, value); - } - } - Action::Remove(index) => { - if index >= chunk.len() { - assert!(catch_unwind(AssertUnwindSafe(|| chunk.remove(index))).is_err()); - } else { - assert_eq!(chunk.remove(index), guide.remove(index)); - } - } - Action::Drain => { - let drained: Vec<_> = chunk.drain().collect(); - let drained_guide: Vec<_> = guide.drain(..).collect(); - assert_eq!(drained, drained_guide); - } - Action::Clear => { - chunk.clear(); - guide.clear(); - } - } - assert_eq!(chunk, guide); - assert!(guide.len() <= capacity); - } - } -} diff -Nru cargo-0.44.1/vendor/sized-chunks/src/tests/sized_chunk.rs cargo-0.47.0/vendor/sized-chunks/src/tests/sized_chunk.rs --- cargo-0.44.1/vendor/sized-chunks/src/tests/sized_chunk.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/tests/sized_chunk.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,353 +0,0 @@ -#![allow(clippy::unit_arg)] - -use std::fmt::Debug; -use std::iter::FromIterator; -use std::panic::{catch_unwind, AssertUnwindSafe}; - -use proptest::{arbitrary::any, collection::vec, prelude::*, proptest}; -use proptest_derive::Arbitrary; - -use crate::sized_chunk::Chunk; - -#[test] -fn validity_invariant() { - assert!(Some(Chunk::>::new()).is_some()); -} - -#[derive(Debug)] -struct InputVec(Vec); - -impl InputVec { - fn unwrap(self) -> Vec { - self.0 - } -} - -impl Arbitrary for InputVec -where - A: Arbitrary + Debug, - ::Strategy: 'static, -{ - type Parameters = usize; - type Strategy = BoxedStrategy>; - fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { - #[allow(clippy::redundant_closure)] - proptest::collection::vec(any::(), 0..Chunk::::CAPACITY) - .prop_map(|v| InputVec(v)) - .boxed() - } -} - -#[derive(Arbitrary, Debug)] -enum Construct -where - A: Arbitrary, - ::Strategy: 'static, -{ - Empty, - Single(A), - Pair((A, A)), - DrainFrom(InputVec), - CollectFrom(InputVec, usize), - FromFront(InputVec, usize), - FromBack(InputVec, usize), -} - -#[derive(Arbitrary, Debug)] -enum Action -where - A: Arbitrary, - ::Strategy: 'static, -{ - PushFront(A), - PushBack(A), - PopFront, - PopBack, - DropLeft(usize), - DropRight(usize), - SplitOff(usize), - Append(Construct), - DrainFromFront(Construct, usize), - DrainFromBack(Construct, usize), - Set(usize, A), - Insert(usize, A), - InsertFrom(Vec, usize), - Remove(usize), - Drain, - Clear, -} - -impl Construct -where - A: Arbitrary + Clone + Debug + Eq, - ::Strategy: 'static, -{ - fn make(self) -> Chunk { - match self { - Construct::Empty => { - let out = Chunk::new(); - assert!(out.is_empty()); - out - } - Construct::Single(value) => { - let out = Chunk::unit(value.clone()); - assert_eq!(out, vec![value]); - out - } - Construct::Pair((left, right)) => { - let out = Chunk::pair(left.clone(), right.clone()); - assert_eq!(out, vec![left, right]); - out - } - Construct::DrainFrom(vec) => { - let vec = vec.unwrap(); - let mut source = Chunk::from_iter(vec.iter().cloned()); - let out = Chunk::drain_from(&mut source); - assert!(source.is_empty()); - assert_eq!(out, vec); - out - } - Construct::CollectFrom(vec, len) => { - let mut vec = vec.unwrap(); - if vec.is_empty() { - return Chunk::new(); - } - let len = len % vec.len(); - let mut source = vec.clone().into_iter(); - let out = Chunk::collect_from(&mut source, len); - let expected_remainder = vec.split_off(len); - let remainder: Vec<_> = source.collect(); - assert_eq!(expected_remainder, remainder); - assert_eq!(out, vec); - out - } - Construct::FromFront(vec, len) => { - let mut vec = vec.unwrap(); - if vec.is_empty() { - return Chunk::new(); - } - let len = len % vec.len(); - let mut source = Chunk::from_iter(vec.iter().cloned()); - let out = Chunk::from_front(&mut source, len); - let remainder = vec.split_off(len); - assert_eq!(source, remainder); - assert_eq!(out, vec); - out - } - Construct::FromBack(vec, len) => { - let mut vec = vec.unwrap(); - if vec.is_empty() { - return Chunk::new(); - } - let len = len % vec.len(); - let mut source = Chunk::from_iter(vec.iter().cloned()); - let out = Chunk::from_back(&mut source, len); - let remainder = vec.split_off(vec.len() - len); - assert_eq!(out, remainder); - assert_eq!(source, vec); - out - } - } - } -} - -fn assert_panic(f: F) -where - F: FnOnce() -> A, -{ - let result = catch_unwind(AssertUnwindSafe(f)); - assert!( - result.is_err(), - "action that should have panicked didn't panic" - ); -} - -proptest! { - #[test] - fn test_constructors(cons: Construct) { - cons.make(); - } - - #[test] - fn test_actions(cons: Construct, actions in vec(any::>(), 0..super::action_count())) { - let capacity = Chunk::::CAPACITY; - let mut chunk = cons.make(); - let mut guide: Vec<_> = chunk.iter().cloned().collect(); - for action in actions { - match action { - Action::PushFront(value) => { - if chunk.is_full() { - assert_panic(|| chunk.push_front(value)); - } else { - chunk.push_front(value); - guide.insert(0, value); - } - } - Action::PushBack(value) => { - if chunk.is_full() { - assert_panic(|| chunk.push_back(value)); - } else { - chunk.push_back(value); - guide.push(value); - } - } - Action::PopFront => { - if chunk.is_empty() { - assert_panic(|| chunk.pop_front()); - } else { - assert_eq!(chunk.pop_front(), guide.remove(0)); - } - } - Action::PopBack => { - if chunk.is_empty() { - assert_panic(|| chunk.pop_back()); - } else { - assert_eq!(chunk.pop_back(), guide.pop().unwrap()); - } - } - Action::DropLeft(index) => { - if index > chunk.len() { - assert_panic(|| chunk.drop_left(index)); - } else { - chunk.drop_left(index); - guide.drain(..index); - } - } - Action::DropRight(index) => { - if index > chunk.len() { - assert_panic(|| chunk.drop_right(index)); - } else { - chunk.drop_right(index); - guide.drain(index..); - } - } - Action::SplitOff(index) => { - if index > chunk.len() { - assert_panic(|| chunk.split_off(index)); - } else { - let chunk_off = chunk.split_off(index); - let guide_off = guide.split_off(index); - assert_eq!(chunk_off, guide_off); - } - } - Action::Append(other) => { - let mut other = other.make(); - let mut other_guide: Vec<_> = other.iter().cloned().collect(); - if other.len() + chunk.len() > capacity { - assert_panic(|| chunk.append(&mut other)); - } else { - chunk.append(&mut other); - guide.append(&mut other_guide); - } - } - Action::DrainFromFront(other, count) => { - let mut other = other.make(); - let mut other_guide: Vec<_> = other.iter().cloned().collect(); - if count > other.len() || chunk.len() + count > capacity { - assert_panic(|| chunk.drain_from_front(&mut other, count)); - } else { - chunk.drain_from_front(&mut other, count); - guide.extend(other_guide.drain(..count)); - assert_eq!(other, other_guide); - } - } - Action::DrainFromBack(other, count) => { - let mut other = other.make(); - let mut other_guide: Vec<_> = other.iter().cloned().collect(); - if count > other.len() || chunk.len() + count > capacity { - assert_panic(|| chunk.drain_from_back(&mut other, count)); - } else { - let other_index = other.len() - count; - chunk.drain_from_back(&mut other, count); - guide = other_guide - .drain(other_index..) - .chain(guide.into_iter()) - .collect(); - assert_eq!(other, other_guide); - } - } - Action::Set(index, value) => { - if index >= chunk.len() { - assert_panic(|| chunk.set(index, value)); - } else { - chunk.set(index, value); - guide[index] = value; - } - } - Action::Insert(index, value) => { - if index > chunk.len() || chunk.is_full() { - assert_panic(|| chunk.insert(index, value)); - } else { - chunk.insert(index, value); - guide.insert(index, value); - } - } - Action::InsertFrom(values, index) => { - if index > chunk.len() || chunk.len() + values.len() > capacity { - assert_panic(|| chunk.insert_from(index, values)); - } else { - chunk.insert_from(index, values.clone()); - for value in values.into_iter().rev() { - guide.insert(index, value); - } - } - } - Action::Remove(index) => { - if index >= chunk.len() { - assert_panic(|| chunk.remove(index)); - } else { - assert_eq!(chunk.remove(index), guide.remove(index)); - } - } - Action::Drain => { - let drained: Vec<_> = chunk.drain().collect(); - let drained_guide: Vec<_> = guide.drain(..).collect(); - assert_eq!(drained, drained_guide); - } - Action::Clear => { - chunk.clear(); - guide.clear(); - } - } - assert_eq!(chunk, guide); - assert!(guide.len() <= capacity); - } - } -} - -#[cfg(feature = "refpool")] -mod refpool_test { - use super::*; - use refpool::{Pool, PoolRef}; - - #[test] - fn stress_test() { - let pool_size = 1024; - let allocs = 2048; - - let pool: Pool> = Pool::new(pool_size); - pool.fill(); - - for _ in 0..8 { - let mut store = Vec::new(); - for _ in 0..allocs { - store.push(PoolRef::default(&pool)); - } - for chunk in &mut store { - let chunk = PoolRef::make_mut(&pool, chunk); - for _ in 0..32 { - chunk.push_front(1); - chunk.push_back(2); - } - } - let mut expected: Chunk = Chunk::new(); - for _ in 0..32 { - expected.push_back(2); - expected.push_front(1); - } - for chunk in &store { - assert_eq!(expected, **chunk); - } - } - } -} diff -Nru cargo-0.44.1/vendor/sized-chunks/src/tests/sparse_chunk.rs cargo-0.47.0/vendor/sized-chunks/src/tests/sparse_chunk.rs --- cargo-0.44.1/vendor/sized-chunks/src/tests/sparse_chunk.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/tests/sparse_chunk.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,104 +0,0 @@ -#![allow(clippy::unit_arg)] - -use std::collections::BTreeMap; -use std::fmt::Debug; -use std::panic::{catch_unwind, AssertUnwindSafe}; - -use proptest::{arbitrary::any, collection::vec, prelude::*, proptest}; -use proptest_derive::Arbitrary; - -use crate::sparse_chunk::SparseChunk; - -#[test] -fn validity_invariant() { - assert!(Some(SparseChunk::>::new()).is_some()); -} - -#[derive(Arbitrary, Debug)] -enum Construct { - Empty, - Single((usize, A)), - Pair((usize, A, usize, A)), -} - -#[derive(Arbitrary, Debug)] -enum Action { - Insert(usize, A), - Remove(usize), - Pop, -} - -impl Construct -where - A: Arbitrary + Clone + Debug + Eq, - ::Strategy: 'static, -{ - fn make(self) -> SparseChunk { - match self { - Construct::Empty => { - let out = SparseChunk::new(); - assert!(out.is_empty()); - out - } - Construct::Single((index, value)) => { - let index = index % SparseChunk::::CAPACITY; - let out = SparseChunk::unit(index, value.clone()); - let mut guide = BTreeMap::new(); - guide.insert(index, value); - assert_eq!(out, guide); - out - } - Construct::Pair((left_index, left, right_index, right)) => { - let left_index = left_index % SparseChunk::::CAPACITY; - let right_index = right_index % SparseChunk::::CAPACITY; - let out = SparseChunk::pair(left_index, left.clone(), right_index, right.clone()); - let mut guide = BTreeMap::new(); - guide.insert(left_index, left); - guide.insert(right_index, right); - assert_eq!(out, guide); - out - } - } - } -} - -proptest! { - #[test] - fn test_constructors(cons: Construct) { - cons.make(); - } - - #[test] - fn test_actions(cons: Construct, actions in vec(any::>(), 0..super::action_count())) { - let capacity = SparseChunk::::CAPACITY; - let mut chunk = cons.make(); - let mut guide: BTreeMap<_, _> = chunk.entries().map(|(i, v)| (i, *v)).collect(); - for action in actions { - match action { - Action::Insert(index, value) => { - if index >= capacity { - assert!(catch_unwind(AssertUnwindSafe(|| chunk.insert(index, value))).is_err()); - } else { - assert_eq!(chunk.insert(index, value), guide.insert(index, value)); - } - } - Action::Remove(index) => { - if index >= capacity { - assert!(catch_unwind(AssertUnwindSafe(|| chunk.remove(index))).is_err()); - } else { - assert_eq!(chunk.remove(index), guide.remove(&index)); - } - } - Action::Pop => { - if let Some(index) = chunk.first_index() { - assert_eq!(chunk.pop(), guide.remove(&index)); - } else { - assert_eq!(chunk.pop(), None); - } - } - } - assert_eq!(chunk, guide); - assert!(guide.len() <= SparseChunk::::CAPACITY); - } - } -} diff -Nru cargo-0.44.1/vendor/sized-chunks/src/tests.rs cargo-0.47.0/vendor/sized-chunks/src/tests.rs --- cargo-0.44.1/vendor/sized-chunks/src/tests.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/tests.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,18 @@ +use std::sync::atomic::{AtomicUsize, Ordering}; + +pub(crate) struct DropTest<'a> { + counter: &'a AtomicUsize, +} + +impl<'a> DropTest<'a> { + pub(crate) fn new(counter: &'a AtomicUsize) -> Self { + counter.fetch_add(1, Ordering::Relaxed); + DropTest { counter } + } +} + +impl<'a> Drop for DropTest<'a> { + fn drop(&mut self) { + self.counter.fetch_sub(1, Ordering::Relaxed); + } +} diff -Nru cargo-0.44.1/vendor/sized-chunks/src/types.rs cargo-0.47.0/vendor/sized-chunks/src/types.rs --- cargo-0.44.1/vendor/sized-chunks/src/types.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/sized-chunks/src/types.rs 2020-10-01 21:38:28.000000000 +0000 @@ -4,7 +4,7 @@ //! Helper types for chunks. -use std::marker::PhantomData; +use core::marker::PhantomData; use typenum::*; diff -Nru cargo-0.44.1/vendor/smallvec/benches/bench.rs cargo-0.47.0/vendor/smallvec/benches/bench.rs --- cargo-0.44.1/vendor/smallvec/benches/bench.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/smallvec/benches/bench.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,295 +0,0 @@ -#![feature(test)] - -#[macro_use] -extern crate smallvec; -extern crate test; - -use self::test::Bencher; -use smallvec::{ExtendFromSlice, SmallVec}; - -const VEC_SIZE: usize = 16; -const SPILLED_SIZE: usize = 100; - -trait Vector: for<'a> From<&'a [T]> + Extend + ExtendFromSlice { - fn new() -> Self; - fn push(&mut self, val: T); - fn pop(&mut self) -> Option; - fn remove(&mut self, p: usize) -> T; - fn insert(&mut self, n: usize, val: T); - fn from_elem(val: T, n: usize) -> Self; - fn from_elems(val: &[T]) -> Self; -} - -impl Vector for Vec { - fn new() -> Self { - Self::with_capacity(VEC_SIZE) - } - - fn push(&mut self, val: T) { - self.push(val) - } - - fn pop(&mut self) -> Option { - self.pop() - } - - fn remove(&mut self, p: usize) -> T { - self.remove(p) - } - - fn insert(&mut self, n: usize, val: T) { - self.insert(n, val) - } - - fn from_elem(val: T, n: usize) -> Self { - vec![val; n] - } - - fn from_elems(val: &[T]) -> Self { - val.to_owned() - } -} - -impl Vector for SmallVec<[T; VEC_SIZE]> { - fn new() -> Self { - Self::new() - } - - fn push(&mut self, val: T) { - self.push(val) - } - - fn pop(&mut self) -> Option { - self.pop() - } - - fn remove(&mut self, p: usize) -> T { - self.remove(p) - } - - fn insert(&mut self, n: usize, val: T) { - self.insert(n, val) - } - - fn from_elem(val: T, n: usize) -> Self { - smallvec![val; n] - } - - fn from_elems(val: &[T]) -> Self { - SmallVec::from_slice(val) - } -} - -macro_rules! make_benches { - ($typ:ty { $($b_name:ident => $g_name:ident($($args:expr),*),)* }) => { - $( - #[bench] - fn $b_name(b: &mut Bencher) { - $g_name::<$typ>($($args,)* b) - } - )* - } -} - -make_benches! { - SmallVec<[u64; VEC_SIZE]> { - bench_push => gen_push(SPILLED_SIZE as _), - bench_push_small => gen_push(VEC_SIZE as _), - bench_insert => gen_insert(SPILLED_SIZE as _), - bench_insert_small => gen_insert(VEC_SIZE as _), - bench_remove => gen_remove(SPILLED_SIZE as _), - bench_remove_small => gen_remove(VEC_SIZE as _), - bench_extend => gen_extend(SPILLED_SIZE as _), - bench_extend_small => gen_extend(VEC_SIZE as _), - bench_from_iter => gen_from_iter(SPILLED_SIZE as _), - bench_from_iter_small => gen_from_iter(VEC_SIZE as _), - bench_from_slice => gen_from_slice(SPILLED_SIZE as _), - bench_from_slice_small => gen_from_slice(VEC_SIZE as _), - bench_extend_from_slice => gen_extend_from_slice(SPILLED_SIZE as _), - bench_extend_from_slice_small => gen_extend_from_slice(VEC_SIZE as _), - bench_macro_from_elem => gen_from_elem(SPILLED_SIZE as _), - bench_macro_from_elem_small => gen_from_elem(VEC_SIZE as _), - bench_pushpop => gen_pushpop(), - } -} - -make_benches! { - Vec { - bench_push_vec => gen_push(SPILLED_SIZE as _), - bench_push_vec_small => gen_push(VEC_SIZE as _), - bench_insert_vec => gen_insert(SPILLED_SIZE as _), - bench_insert_vec_small => gen_insert(VEC_SIZE as _), - bench_remove_vec => gen_remove(SPILLED_SIZE as _), - bench_remove_vec_small => gen_remove(VEC_SIZE as _), - bench_extend_vec => gen_extend(SPILLED_SIZE as _), - bench_extend_vec_small => gen_extend(VEC_SIZE as _), - bench_from_iter_vec => gen_from_iter(SPILLED_SIZE as _), - bench_from_iter_vec_small => gen_from_iter(VEC_SIZE as _), - bench_from_slice_vec => gen_from_slice(SPILLED_SIZE as _), - bench_from_slice_vec_small => gen_from_slice(VEC_SIZE as _), - bench_extend_from_slice_vec => gen_extend_from_slice(SPILLED_SIZE as _), - bench_extend_from_slice_vec_small => gen_extend_from_slice(VEC_SIZE as _), - bench_macro_from_elem_vec => gen_from_elem(SPILLED_SIZE as _), - bench_macro_from_elem_vec_small => gen_from_elem(VEC_SIZE as _), - bench_pushpop_vec => gen_pushpop(), - } -} - -fn gen_push>(n: u64, b: &mut Bencher) { - #[inline(never)] - fn push_noinline>(vec: &mut V, x: u64) { - vec.push(x); - } - - b.iter(|| { - let mut vec = V::new(); - for x in 0..n { - push_noinline(&mut vec, x); - } - vec - }); -} - -fn gen_insert>(n: u64, b: &mut Bencher) { - #[inline(never)] - fn insert_noinline>(vec: &mut V, p: usize, x: u64) { - vec.insert(p, x) - } - - b.iter(|| { - let mut vec = V::new(); - // Add one element, with each iteration we insert one before the end. - // This means that we benchmark the insertion operation and not the - // time it takes to `ptr::copy` the data. - vec.push(0); - for x in 0..n { - insert_noinline(&mut vec, x as _, x); - } - vec - }); -} - -fn gen_remove>(n: usize, b: &mut Bencher) { - #[inline(never)] - fn remove_noinline>(vec: &mut V, p: usize) -> u64 { - vec.remove(p) - } - - b.iter(|| { - let mut vec = V::from_elem(0, n as _); - - for x in (0..n - 1).rev() { - remove_noinline(&mut vec, x); - } - }); -} - -fn gen_extend>(n: u64, b: &mut Bencher) { - b.iter(|| { - let mut vec = V::new(); - vec.extend(0..n); - vec - }); -} - -fn gen_from_iter>(n: u64, b: &mut Bencher) { - let v: Vec = (0..n).collect(); - b.iter(|| { - let vec = V::from(&v); - vec - }); -} - -fn gen_from_slice>(n: u64, b: &mut Bencher) { - let v: Vec = (0..n).collect(); - b.iter(|| { - let vec = V::from_elems(&v); - vec - }); -} - -fn gen_extend_from_slice>(n: u64, b: &mut Bencher) { - let v: Vec = (0..n).collect(); - b.iter(|| { - let mut vec = V::new(); - vec.extend_from_slice(&v); - vec - }); -} - -fn gen_pushpop>(b: &mut Bencher) { - #[inline(never)] - fn pushpop_noinline>(vec: &mut V, x: u64) -> Option { - vec.push(x); - vec.pop() - } - - b.iter(|| { - let mut vec = V::new(); - for x in 0..SPILLED_SIZE as _ { - pushpop_noinline(&mut vec, x); - } - vec - }); -} - -fn gen_from_elem>(n: usize, b: &mut Bencher) { - b.iter(|| { - let vec = V::from_elem(42, n); - vec - }); -} - -#[bench] -fn bench_insert_many(b: &mut Bencher) { - #[inline(never)] - fn insert_many_noinline>( - vec: &mut SmallVec<[u64; VEC_SIZE]>, - index: usize, - iterable: I, - ) { - vec.insert_many(index, iterable) - } - - b.iter(|| { - let mut vec = SmallVec::<[u64; VEC_SIZE]>::new(); - insert_many_noinline(&mut vec, 0, 0..SPILLED_SIZE as _); - insert_many_noinline(&mut vec, 0, 0..SPILLED_SIZE as _); - vec - }); -} - -#[bench] -fn bench_insert_from_slice(b: &mut Bencher) { - let v: Vec = (0..SPILLED_SIZE as _).collect(); - b.iter(|| { - let mut vec = SmallVec::<[u64; VEC_SIZE]>::new(); - vec.insert_from_slice(0, &v); - vec.insert_from_slice(0, &v); - vec - }); -} - -#[bench] -fn bench_macro_from_list(b: &mut Bencher) { - b.iter(|| { - let vec: SmallVec<[u64; 16]> = smallvec![ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, 0x40, 0x80, - 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000, 0x20000, 0x40000, - 0x80000, 0x100000, - ]; - vec - }); -} - -#[bench] -fn bench_macro_from_list_vec(b: &mut Bencher) { - b.iter(|| { - let vec: Vec = vec![ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, 0x40, 0x80, - 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000, 0x20000, 0x40000, - 0x80000, 0x100000, - ]; - vec - }); -} diff -Nru cargo-0.44.1/vendor/smallvec/.cargo-checksum.json cargo-0.47.0/vendor/smallvec/.cargo-checksum.json --- cargo-0.44.1/vendor/smallvec/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/smallvec/.cargo-checksum.json 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -{"files":{},"package":"c7cb5678e1615754284ec264d9bb5b4c27d2018577fd90ac0ceb578591ed5ee4"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/smallvec/Cargo.toml cargo-0.47.0/vendor/smallvec/Cargo.toml --- cargo-0.44.1/vendor/smallvec/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/smallvec/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -edition = "2018" -name = "smallvec" -version = "1.4.0" -authors = ["Simon Sapin "] -description = "'Small vector' optimization: store up to a small number of items on the stack" -documentation = "https://docs.rs/smallvec/" -readme = "README.md" -keywords = ["small", "vec", "vector", "stack", "no_std"] -categories = ["data-structures"] -license = "MIT/Apache-2.0" -repository = "https://github.com/servo/rust-smallvec" - -[lib] -name = "smallvec" -path = "lib.rs" -[dependencies.serde] -version = "1" -optional = true -[dev-dependencies.bincode] -version = "1.0.1" - -[features] -const_generics = [] -may_dangle = [] -specialization = [] -union = [] -write = [] diff -Nru cargo-0.44.1/vendor/smallvec/lib.rs cargo-0.47.0/vendor/smallvec/lib.rs --- cargo-0.44.1/vendor/smallvec/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/smallvec/lib.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2669 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Small vectors in various sizes. These store a certain number of elements inline, and fall back -//! to the heap for larger allocations. This can be a useful optimization for improving cache -//! locality and reducing allocator traffic for workloads that fit within the inline buffer. -//! -//! ## `no_std` support -//! -//! By default, `smallvec` does not depend on `std`. However, the optional -//! `write` feature implements the `std::io::Write` trait for vectors of `u8`. -//! When this feature is enabled, `smallvec` depends on `std`. -//! -//! ## Optional features -//! -//! ### `write` -//! -//! When this feature is enabled, `SmallVec<[u8; _]>` implements the `std::io::Write` trait. -//! This feature is not compatible with `#![no_std]` programs. -//! -//! ### `union` -//! -//! **This feature is unstable and requires a nightly build of the Rust toolchain.** -//! -//! When the `union` feature is enabled `smallvec` will track its state (inline or spilled) -//! without the use of an enum tag, reducing the size of the `smallvec` by one machine word. -//! This means that there is potentially no space overhead compared to `Vec`. -//! Note that `smallvec` can still be larger than `Vec` if the inline buffer is larger than two -//! machine words. -//! -//! To use this feature add `features = ["union"]` in the `smallvec` section of Cargo.toml. -//! Note that this feature requires a nightly compiler (for now). -//! -//! ### `const_generics` -//! -//! **This feature is unstable and requires a nightly build of the Rust toolchain.** -//! -//! When this feature is enabled, `SmallVec` works with any arrays of any size, not just a fixed -//! list of sizes. -//! -//! ### `specialization` -//! -//! **This feature is unstable and requires a nightly build of the Rust toolchain.** -//! -//! When this feature is enabled, `SmallVec::from(slice)` has improved performance for slices -//! of `Copy` types. (Without this feature, you can use `SmallVec::from_slice` to get optimal -//! performance for `Copy` types.) -//! -//! ### `may_dangle` -//! -//! **This feature is unstable and requires a nightly build of the Rust toolchain.** -//! -//! This feature makes the Rust compiler less strict about use of vectors that contain borrowed -//! references. For details, see the -//! [Rustonomicon](https://doc.rust-lang.org/1.42.0/nomicon/dropck.html#an-escape-hatch). - -#![no_std] -#![cfg_attr(feature = "union", feature(untagged_unions))] -#![cfg_attr(feature = "specialization", feature(specialization))] -#![cfg_attr(feature = "may_dangle", feature(dropck_eyepatch))] -#![cfg_attr(feature = "const_generics", allow(incomplete_features))] -#![cfg_attr(feature = "const_generics", feature(const_generics))] -#![deny(missing_docs)] - -#[doc(hidden)] -pub extern crate alloc; - -#[cfg(any(test, feature = "write"))] -extern crate std; - -use alloc::alloc::{Layout, LayoutErr}; -use alloc::boxed::Box; -use alloc::{vec, vec::Vec}; -use core::borrow::{Borrow, BorrowMut}; -use core::cmp; -use core::fmt; -use core::hash::{Hash, Hasher}; -use core::hint::unreachable_unchecked; -use core::iter::{repeat, FromIterator, FusedIterator, IntoIterator}; -use core::mem; -use core::mem::MaybeUninit; -use core::ops::{self, RangeBounds}; -use core::ptr::{self, NonNull}; -use core::slice::{self, SliceIndex}; - -#[cfg(feature = "serde")] -use serde::{ - de::{Deserialize, Deserializer, SeqAccess, Visitor}, - ser::{Serialize, SerializeSeq, Serializer}, -}; - -#[cfg(feature = "serde")] -use core::marker::PhantomData; - -#[cfg(feature = "write")] -use std::io; - -/// Creates a [`SmallVec`] containing the arguments. -/// -/// `smallvec!` allows `SmallVec`s to be defined with the same syntax as array expressions. -/// There are two forms of this macro: -/// -/// - Create a [`SmallVec`] containing a given list of elements: -/// -/// ``` -/// # #[macro_use] extern crate smallvec; -/// # use smallvec::SmallVec; -/// # fn main() { -/// let v: SmallVec<[_; 128]> = smallvec![1, 2, 3]; -/// assert_eq!(v[0], 1); -/// assert_eq!(v[1], 2); -/// assert_eq!(v[2], 3); -/// # } -/// ``` -/// -/// - Create a [`SmallVec`] from a given element and size: -/// -/// ``` -/// # #[macro_use] extern crate smallvec; -/// # use smallvec::SmallVec; -/// # fn main() { -/// let v: SmallVec<[_; 0x8000]> = smallvec![1; 3]; -/// assert_eq!(v, SmallVec::from_buf([1, 1, 1])); -/// # } -/// ``` -/// -/// Note that unlike array expressions this syntax supports all elements -/// which implement [`Clone`] and the number of elements doesn't have to be -/// a constant. -/// -/// This will use `clone` to duplicate an expression, so one should be careful -/// using this with types having a nonstandard `Clone` implementation. For -/// example, `smallvec![Rc::new(1); 5]` will create a vector of five references -/// to the same boxed integer value, not five references pointing to independently -/// boxed integers. - -#[macro_export] -macro_rules! smallvec { - // count helper: transform any expression into 1 - (@one $x:expr) => (1usize); - ($elem:expr; $n:expr) => ({ - $crate::SmallVec::from_elem($elem, $n) - }); - ($($x:expr),*$(,)*) => ({ - let count = 0usize $(+ smallvec!(@one $x))*; - let mut vec = $crate::SmallVec::new(); - if count <= vec.inline_size() { - $(vec.push($x);)* - vec - } else { - $crate::SmallVec::from_vec($crate::alloc::vec![$($x,)*]) - } - }); -} - -/// `panic!()` in debug builds, optimization hint in release. -#[cfg(not(feature = "union"))] -macro_rules! debug_unreachable { - () => { - debug_unreachable!("entered unreachable code") - }; - ($e:expr) => { - if cfg!(not(debug_assertions)) { - unreachable_unchecked(); - } else { - panic!($e); - } - }; -} - -/// Trait to be implemented by a collection that can be extended from a slice -/// -/// ## Example -/// -/// ```rust -/// use smallvec::{ExtendFromSlice, SmallVec}; -/// -/// fn initialize>(v: &mut V) { -/// v.extend_from_slice(b"Test!"); -/// } -/// -/// let mut vec = Vec::new(); -/// initialize(&mut vec); -/// assert_eq!(&vec, b"Test!"); -/// -/// let mut small_vec = SmallVec::<[u8; 8]>::new(); -/// initialize(&mut small_vec); -/// assert_eq!(&small_vec as &[_], b"Test!"); -/// ``` -pub trait ExtendFromSlice { - /// Extends a collection from a slice of its element type - fn extend_from_slice(&mut self, other: &[T]); -} - -impl ExtendFromSlice for Vec { - fn extend_from_slice(&mut self, other: &[T]) { - Vec::extend_from_slice(self, other) - } -} - -/// Error type for APIs with fallible heap allocation -#[derive(Debug)] -pub enum CollectionAllocErr { - /// Overflow `usize::MAX` or other error during size computation - CapacityOverflow, - /// The allocator return an error - AllocErr { - /// The layout that was passed to the allocator - layout: Layout, - }, -} - -impl From for CollectionAllocErr { - fn from(_: LayoutErr) -> Self { - CollectionAllocErr::CapacityOverflow - } -} - -fn infallible(result: Result) -> T { - match result { - Ok(x) => x, - Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"), - Err(CollectionAllocErr::AllocErr { layout }) => alloc::alloc::handle_alloc_error(layout), - } -} - -/// FIXME: use `Layout::array` when we require a Rust version where it’s stable -/// https://github.com/rust-lang/rust/issues/55724 -fn layout_array(n: usize) -> Result { - let size = mem::size_of::().checked_mul(n) - .ok_or(CollectionAllocErr::CapacityOverflow)?; - let align = mem::align_of::(); - Layout::from_size_align(size, align) - .map_err(|_| CollectionAllocErr::CapacityOverflow) -} - -unsafe fn deallocate(ptr: *mut T, capacity: usize) { - // This unwrap should succeed since the same did when allocating. - let layout = layout_array::(capacity).unwrap(); - alloc::alloc::dealloc(ptr as *mut u8, layout) -} - -/// An iterator that removes the items from a `SmallVec` and yields them by value. -/// -/// Returned from [`SmallVec::drain`][1]. -/// -/// [1]: struct.SmallVec.html#method.drain -pub struct Drain<'a, T: 'a + Array> { - tail_start: usize, - tail_len: usize, - iter: slice::Iter<'a, T::Item>, - vec: NonNull>, -} - -impl<'a, T: 'a + Array> fmt::Debug for Drain<'a, T> -where - T::Item: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Drain").field(&self.iter.as_slice()).finish() - } -} - -unsafe impl<'a, T: Sync + Array> Sync for Drain<'a, T> {} -unsafe impl<'a, T: Send + Array> Send for Drain<'a, T> {} - -impl<'a, T: 'a + Array> Iterator for Drain<'a, T> { - type Item = T::Item; - - #[inline] - fn next(&mut self) -> Option { - self.iter - .next() - .map(|reference| unsafe { ptr::read(reference) }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl<'a, T: 'a + Array> DoubleEndedIterator for Drain<'a, T> { - #[inline] - fn next_back(&mut self) -> Option { - self.iter - .next_back() - .map(|reference| unsafe { ptr::read(reference) }) - } -} - -impl<'a, T: Array> ExactSizeIterator for Drain<'a, T> { - #[inline] - fn len(&self) -> usize { - self.iter.len() - } -} - -impl<'a, T: Array> FusedIterator for Drain<'a, T> {} - -impl<'a, T: 'a + Array> Drop for Drain<'a, T> { - fn drop(&mut self) { - self.for_each(drop); - - if self.tail_len > 0 { - unsafe { - let source_vec = self.vec.as_mut(); - - // memmove back untouched tail, update to new length - let start = source_vec.len(); - let tail = self.tail_start; - if tail != start { - let src = source_vec.as_ptr().add(tail); - let dst = source_vec.as_mut_ptr().add(start); - ptr::copy(src, dst, self.tail_len); - } - source_vec.set_len(start + self.tail_len); - } - } - } -} - -#[cfg(feature = "union")] -union SmallVecData { - inline: MaybeUninit, - heap: (*mut A::Item, usize), -} - -#[cfg(feature = "union")] -impl SmallVecData { - #[inline] - unsafe fn inline(&self) -> *const A::Item { - self.inline.as_ptr() as *const A::Item - } - #[inline] - unsafe fn inline_mut(&mut self) -> *mut A::Item { - self.inline.as_mut_ptr() as *mut A::Item - } - #[inline] - fn from_inline(inline: MaybeUninit) -> SmallVecData { - SmallVecData { inline } - } - #[inline] - unsafe fn into_inline(self) -> MaybeUninit { - self.inline - } - #[inline] - unsafe fn heap(&self) -> (*mut A::Item, usize) { - self.heap - } - #[inline] - unsafe fn heap_mut(&mut self) -> &mut (*mut A::Item, usize) { - &mut self.heap - } - #[inline] - fn from_heap(ptr: *mut A::Item, len: usize) -> SmallVecData { - SmallVecData { heap: (ptr, len) } - } -} - -#[cfg(not(feature = "union"))] -enum SmallVecData { - Inline(MaybeUninit), - Heap((*mut A::Item, usize)), -} - -#[cfg(not(feature = "union"))] -impl SmallVecData { - #[inline] - unsafe fn inline(&self) -> *const A::Item { - match self { - SmallVecData::Inline(a) => a.as_ptr() as *const A::Item, - _ => debug_unreachable!(), - } - } - #[inline] - unsafe fn inline_mut(&mut self) -> *mut A::Item { - match self { - SmallVecData::Inline(a) => a.as_mut_ptr() as *mut A::Item, - _ => debug_unreachable!(), - } - } - #[inline] - fn from_inline(inline: MaybeUninit) -> SmallVecData { - SmallVecData::Inline(inline) - } - #[inline] - unsafe fn into_inline(self) -> MaybeUninit { - match self { - SmallVecData::Inline(a) => a, - _ => debug_unreachable!(), - } - } - #[inline] - unsafe fn heap(&self) -> (*mut A::Item, usize) { - match self { - SmallVecData::Heap(data) => *data, - _ => debug_unreachable!(), - } - } - #[inline] - unsafe fn heap_mut(&mut self) -> &mut (*mut A::Item, usize) { - match self { - SmallVecData::Heap(data) => data, - _ => debug_unreachable!(), - } - } - #[inline] - fn from_heap(ptr: *mut A::Item, len: usize) -> SmallVecData { - SmallVecData::Heap((ptr, len)) - } -} - -unsafe impl Send for SmallVecData {} -unsafe impl Sync for SmallVecData {} - -/// A `Vec`-like container that can store a small number of elements inline. -/// -/// `SmallVec` acts like a vector, but can store a limited amount of data inline within the -/// `SmallVec` struct rather than in a separate allocation. If the data exceeds this limit, the -/// `SmallVec` will "spill" its data onto the heap, allocating a new buffer to hold it. -/// -/// The amount of data that a `SmallVec` can store inline depends on its backing store. The backing -/// store can be any type that implements the `Array` trait; usually it is a small fixed-sized -/// array. For example a `SmallVec<[u64; 8]>` can hold up to eight 64-bit integers inline. -/// -/// ## Example -/// -/// ```rust -/// use smallvec::SmallVec; -/// let mut v = SmallVec::<[u8; 4]>::new(); // initialize an empty vector -/// -/// // The vector can hold up to 4 items without spilling onto the heap. -/// v.extend(0..4); -/// assert_eq!(v.len(), 4); -/// assert!(!v.spilled()); -/// -/// // Pushing another element will force the buffer to spill: -/// v.push(4); -/// assert_eq!(v.len(), 5); -/// assert!(v.spilled()); -/// ``` -pub struct SmallVec { - // The capacity field is used to determine which of the storage variants is active: - // If capacity <= A::size() then the inline variant is used and capacity holds the current length of the vector (number of elements actually in use). - // If capacity > A::size() then the heap variant is used and capacity holds the size of the memory allocation. - capacity: usize, - data: SmallVecData, -} - -impl SmallVec { - /// Construct an empty vector - #[inline] - pub fn new() -> SmallVec { - // Try to detect invalid custom implementations of `Array`. Hopefuly, - // this check should be optimized away entirely for valid ones. - assert!( - mem::size_of::() == A::size() * mem::size_of::() - && mem::align_of::() >= mem::align_of::() - ); - SmallVec { - capacity: 0, - data: SmallVecData::from_inline(MaybeUninit::uninit()), - } - } - - /// Construct an empty vector with enough capacity pre-allocated to store at least `n` - /// elements. - /// - /// Will create a heap allocation only if `n` is larger than the inline capacity. - /// - /// ``` - /// # use smallvec::SmallVec; - /// - /// let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(100); - /// - /// assert!(v.is_empty()); - /// assert!(v.capacity() >= 100); - /// ``` - #[inline] - pub fn with_capacity(n: usize) -> Self { - let mut v = SmallVec::new(); - v.reserve_exact(n); - v - } - - /// Construct a new `SmallVec` from a `Vec`. - /// - /// Elements will be copied to the inline buffer if vec.capacity() <= A::size(). - /// - /// ```rust - /// use smallvec::SmallVec; - /// - /// let vec = vec![1, 2, 3, 4, 5]; - /// let small_vec: SmallVec<[_; 3]> = SmallVec::from_vec(vec); - /// - /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - /// ``` - #[inline] - pub fn from_vec(mut vec: Vec) -> SmallVec { - if vec.capacity() <= A::size() { - unsafe { - let mut data = SmallVecData::::from_inline(MaybeUninit::uninit()); - let len = vec.len(); - vec.set_len(0); - ptr::copy_nonoverlapping(vec.as_ptr(), data.inline_mut(), len); - - SmallVec { - capacity: len, - data, - } - } - } else { - let (ptr, cap, len) = (vec.as_mut_ptr(), vec.capacity(), vec.len()); - mem::forget(vec); - - SmallVec { - capacity: cap, - data: SmallVecData::from_heap(ptr, len), - } - } - } - - /// Constructs a new `SmallVec` on the stack from an `A` without - /// copying elements. - /// - /// ```rust - /// use smallvec::SmallVec; - /// - /// let buf = [1, 2, 3, 4, 5]; - /// let small_vec: SmallVec<_> = SmallVec::from_buf(buf); - /// - /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - /// ``` - #[inline] - pub fn from_buf(buf: A) -> SmallVec { - SmallVec { - capacity: A::size(), - data: SmallVecData::from_inline(MaybeUninit::new(buf)), - } - } - - /// Constructs a new `SmallVec` on the stack from an `A` without - /// copying elements. Also sets the length, which must be less or - /// equal to the size of `buf`. - /// - /// ```rust - /// use smallvec::SmallVec; - /// - /// let buf = [1, 2, 3, 4, 5, 0, 0, 0]; - /// let small_vec: SmallVec<_> = SmallVec::from_buf_and_len(buf, 5); - /// - /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - /// ``` - #[inline] - pub fn from_buf_and_len(buf: A, len: usize) -> SmallVec { - assert!(len <= A::size()); - unsafe { SmallVec::from_buf_and_len_unchecked(MaybeUninit::new(buf), len) } - } - - /// Constructs a new `SmallVec` on the stack from an `A` without - /// copying elements. Also sets the length. The user is responsible - /// for ensuring that `len <= A::size()`. - /// - /// ```rust - /// use smallvec::SmallVec; - /// use std::mem::MaybeUninit; - /// - /// let buf = [1, 2, 3, 4, 5, 0, 0, 0]; - /// let small_vec: SmallVec<_> = unsafe { - /// SmallVec::from_buf_and_len_unchecked(MaybeUninit::new(buf), 5) - /// }; - /// - /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - /// ``` - #[inline] - pub unsafe fn from_buf_and_len_unchecked(buf: MaybeUninit, len: usize) -> SmallVec { - SmallVec { - capacity: len, - data: SmallVecData::from_inline(buf), - } - } - - /// Sets the length of a vector. - /// - /// This will explicitly set the size of the vector, without actually - /// modifying its buffers, so it is up to the caller to ensure that the - /// vector is actually the specified size. - pub unsafe fn set_len(&mut self, new_len: usize) { - let (_, len_ptr, _) = self.triple_mut(); - *len_ptr = new_len; - } - - /// The maximum number of elements this vector can hold inline - #[inline] - pub fn inline_size(&self) -> usize { - A::size() - } - - /// The number of elements stored in the vector - #[inline] - pub fn len(&self) -> usize { - self.triple().1 - } - - /// Returns `true` if the vector is empty - #[inline] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// The number of items the vector can hold without reallocating - #[inline] - pub fn capacity(&self) -> usize { - self.triple().2 - } - - /// Returns a tuple with (data ptr, len, capacity) - /// Useful to get all SmallVec properties with a single check of the current storage variant. - #[inline] - fn triple(&self) -> (*const A::Item, usize, usize) { - unsafe { - if self.spilled() { - let (ptr, len) = self.data.heap(); - (ptr, len, self.capacity) - } else { - (self.data.inline(), self.capacity, A::size()) - } - } - } - - /// Returns a tuple with (data ptr, len ptr, capacity) - #[inline] - fn triple_mut(&mut self) -> (*mut A::Item, &mut usize, usize) { - unsafe { - if self.spilled() { - let &mut (ptr, ref mut len_ptr) = self.data.heap_mut(); - (ptr, len_ptr, self.capacity) - } else { - (self.data.inline_mut(), &mut self.capacity, A::size()) - } - } - } - - /// Returns `true` if the data has spilled into a separate heap-allocated buffer. - #[inline] - pub fn spilled(&self) -> bool { - self.capacity > A::size() - } - - /// Creates a draining iterator that removes the specified range in the vector - /// and yields the removed items. - /// - /// Note 1: The element range is removed even if the iterator is only - /// partially consumed or not consumed at all. - /// - /// Note 2: It is unspecified how many elements are removed from the vector - /// if the `Drain` value is leaked. - /// - /// # Panics - /// - /// Panics if the starting point is greater than the end point or if - /// the end point is greater than the length of the vector. - pub fn drain(&mut self, range: R) -> Drain<'_, A> - where - R: RangeBounds, - { - use core::ops::Bound::*; - - let len = self.len(); - let start = match range.start_bound() { - Included(&n) => n, - Excluded(&n) => n + 1, - Unbounded => 0, - }; - let end = match range.end_bound() { - Included(&n) => n + 1, - Excluded(&n) => n, - Unbounded => len, - }; - - assert!(start <= end); - assert!(end <= len); - - unsafe { - self.set_len(start); - - let range_slice = slice::from_raw_parts_mut(self.as_mut_ptr().add(start), end - start); - - Drain { - tail_start: end, - tail_len: len - end, - iter: range_slice.iter(), - vec: NonNull::from(self), - } - } - } - - /// Append an item to the vector. - #[inline] - pub fn push(&mut self, value: A::Item) { - unsafe { - let (_, &mut len, cap) = self.triple_mut(); - if len == cap { - self.reserve(1); - } - let (ptr, len_ptr, _) = self.triple_mut(); - *len_ptr = len + 1; - ptr::write(ptr.add(len), value); - } - } - - /// Remove an item from the end of the vector and return it, or None if empty. - #[inline] - pub fn pop(&mut self) -> Option { - unsafe { - let (ptr, len_ptr, _) = self.triple_mut(); - if *len_ptr == 0 { - return None; - } - let last_index = *len_ptr - 1; - *len_ptr = last_index; - Some(ptr::read(ptr.add(last_index))) - } - } - - /// Re-allocate to set the capacity to `max(new_cap, inline_size())`. - /// - /// Panics if `new_cap` is less than the vector's length - /// or if the capacity computation overflows `usize`. - pub fn grow(&mut self, new_cap: usize) { - infallible(self.try_grow(new_cap)) - } - - /// Re-allocate to set the capacity to `max(new_cap, inline_size())`. - /// - /// Panics if `new_cap` is less than the vector's length - pub fn try_grow(&mut self, new_cap: usize) -> Result<(), CollectionAllocErr> { - unsafe { - let (ptr, &mut len, cap) = self.triple_mut(); - let unspilled = !self.spilled(); - assert!(new_cap >= len); - if new_cap <= self.inline_size() { - if unspilled { - return Ok(()); - } - self.data = SmallVecData::from_inline(MaybeUninit::uninit()); - ptr::copy_nonoverlapping(ptr, self.data.inline_mut(), len); - self.capacity = len; - deallocate(ptr, cap); - } else if new_cap != cap { - let layout = layout_array::(new_cap)?; - let new_alloc; - if unspilled { - new_alloc = NonNull::new(alloc::alloc::alloc(layout)) - .ok_or(CollectionAllocErr::AllocErr { layout })? - .cast() - .as_ptr(); - ptr::copy_nonoverlapping(ptr, new_alloc, len); - } else { - // This should never fail since the same succeeded - // when previously allocating `ptr`. - let old_layout = layout_array::(cap)?; - - let new_ptr = alloc::alloc::realloc(ptr as *mut u8, old_layout, layout.size()); - new_alloc = NonNull::new(new_ptr) - .ok_or(CollectionAllocErr::AllocErr { layout })? - .cast() - .as_ptr(); - } - self.data = SmallVecData::from_heap(new_alloc, len); - self.capacity = new_cap; - } - Ok(()) - } - } - - /// Reserve capacity for `additional` more elements to be inserted. - /// - /// May reserve more space to avoid frequent reallocations. - /// - /// Panics if the capacity computation overflows `usize`. - #[inline] - pub fn reserve(&mut self, additional: usize) { - infallible(self.try_reserve(additional)) - } - - /// Reserve capacity for `additional` more elements to be inserted. - /// - /// May reserve more space to avoid frequent reallocations. - pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { - // prefer triple_mut() even if triple() would work - // so that the optimizer removes duplicated calls to it - // from callers like insert() - let (_, &mut len, cap) = self.triple_mut(); - if cap - len >= additional { - return Ok(()); - } - let new_cap = len - .checked_add(additional) - .and_then(usize::checked_next_power_of_two) - .ok_or(CollectionAllocErr::CapacityOverflow)?; - self.try_grow(new_cap) - } - - /// Reserve the minimum capacity for `additional` more elements to be inserted. - /// - /// Panics if the new capacity overflows `usize`. - pub fn reserve_exact(&mut self, additional: usize) { - infallible(self.try_reserve_exact(additional)) - } - - /// Reserve the minimum capacity for `additional` more elements to be inserted. - pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { - let (_, &mut len, cap) = self.triple_mut(); - if cap - len >= additional { - return Ok(()); - } - let new_cap = len - .checked_add(additional) - .ok_or(CollectionAllocErr::CapacityOverflow)?; - self.try_grow(new_cap) - } - - /// Shrink the capacity of the vector as much as possible. - /// - /// When possible, this will move data from an external heap buffer to the vector's inline - /// storage. - pub fn shrink_to_fit(&mut self) { - if !self.spilled() { - return; - } - let len = self.len(); - if self.inline_size() >= len { - unsafe { - let (ptr, len) = self.data.heap(); - self.data = SmallVecData::from_inline(MaybeUninit::uninit()); - ptr::copy_nonoverlapping(ptr, self.data.inline_mut(), len); - deallocate(ptr, self.capacity); - self.capacity = len; - } - } else if self.capacity() > len { - self.grow(len); - } - } - - /// Shorten the vector, keeping the first `len` elements and dropping the rest. - /// - /// If `len` is greater than or equal to the vector's current length, this has no - /// effect. - /// - /// This does not re-allocate. If you want the vector's capacity to shrink, call - /// `shrink_to_fit` after truncating. - pub fn truncate(&mut self, len: usize) { - unsafe { - let (ptr, len_ptr, _) = self.triple_mut(); - while len < *len_ptr { - let last_index = *len_ptr - 1; - *len_ptr = last_index; - ptr::drop_in_place(ptr.add(last_index)); - } - } - } - - /// Extracts a slice containing the entire vector. - /// - /// Equivalent to `&s[..]`. - pub fn as_slice(&self) -> &[A::Item] { - self - } - - /// Extracts a mutable slice of the entire vector. - /// - /// Equivalent to `&mut s[..]`. - pub fn as_mut_slice(&mut self) -> &mut [A::Item] { - self - } - - /// Remove the element at position `index`, replacing it with the last element. - /// - /// This does not preserve ordering, but is O(1). - /// - /// Panics if `index` is out of bounds. - #[inline] - pub fn swap_remove(&mut self, index: usize) -> A::Item { - let len = self.len(); - self.swap(len - 1, index); - self.pop() - .unwrap_or_else(|| unsafe { unreachable_unchecked() }) - } - - /// Remove all elements from the vector. - #[inline] - pub fn clear(&mut self) { - self.truncate(0); - } - - /// Remove and return the element at position `index`, shifting all elements after it to the - /// left. - /// - /// Panics if `index` is out of bounds. - pub fn remove(&mut self, index: usize) -> A::Item { - unsafe { - let (mut ptr, len_ptr, _) = self.triple_mut(); - let len = *len_ptr; - assert!(index < len); - *len_ptr = len - 1; - ptr = ptr.add(index); - let item = ptr::read(ptr); - ptr::copy(ptr.add(1), ptr, len - index - 1); - item - } - } - - /// Insert an element at position `index`, shifting all elements after it to the right. - /// - /// Panics if `index` is out of bounds. - pub fn insert(&mut self, index: usize, element: A::Item) { - self.reserve(1); - - unsafe { - let (mut ptr, len_ptr, _) = self.triple_mut(); - let len = *len_ptr; - assert!(index <= len); - *len_ptr = len + 1; - ptr = ptr.add(index); - ptr::copy(ptr, ptr.add(1), len - index); - ptr::write(ptr, element); - } - } - - /// Insert multiple elements at position `index`, shifting all following elements toward the - /// back. - /// - /// Note: when the iterator panics, this can leak memory. - pub fn insert_many>(&mut self, index: usize, iterable: I) { - let iter = iterable.into_iter(); - if index == self.len() { - return self.extend(iter); - } - - let (lower_size_bound, _) = iter.size_hint(); - assert!(lower_size_bound <= core::isize::MAX as usize); // Ensure offset is indexable - assert!(index + lower_size_bound >= index); // Protect against overflow - self.reserve(lower_size_bound); - - unsafe { - let old_len = self.len(); - assert!(index <= old_len); - let mut ptr = self.as_mut_ptr().add(index); - - // Move the trailing elements. - ptr::copy(ptr, ptr.add(lower_size_bound), old_len - index); - - // In case the iterator panics, don't double-drop the items we just copied above. - self.set_len(index); - - let mut num_added = 0; - for element in iter { - let mut cur = ptr.add(num_added); - if num_added >= lower_size_bound { - // Iterator provided more elements than the hint. Move trailing items again. - self.reserve(1); - ptr = self.as_mut_ptr().add(index); - cur = ptr.add(num_added); - ptr::copy(cur, cur.add(1), old_len - index); - } - ptr::write(cur, element); - num_added += 1; - } - if num_added < lower_size_bound { - // Iterator provided fewer elements than the hint - ptr::copy( - ptr.add(lower_size_bound), - ptr.add(num_added), - old_len - index, - ); - } - - self.set_len(old_len + num_added); - } - } - - /// Convert a SmallVec to a Vec, without reallocating if the SmallVec has already spilled onto - /// the heap. - pub fn into_vec(self) -> Vec { - if self.spilled() { - unsafe { - let (ptr, len) = self.data.heap(); - let v = Vec::from_raw_parts(ptr, len, self.capacity); - mem::forget(self); - v - } - } else { - self.into_iter().collect() - } - } - - /// Converts a `SmallVec` into a `Box<[T]>` without reallocating if the `SmallVec` has already spilled - /// onto the heap. - /// - /// Note that this will drop any excess capacity. - pub fn into_boxed_slice(self) -> Box<[A::Item]> { - self.into_vec().into_boxed_slice() - } - - /// Convert the SmallVec into an `A` if possible. Otherwise return `Err(Self)`. - /// - /// This method returns `Err(Self)` if the SmallVec is too short (and the `A` contains uninitialized elements), - /// or if the SmallVec is too long (and all the elements were spilled to the heap). - pub fn into_inner(self) -> Result { - if self.spilled() || self.len() != A::size() { - Err(self) - } else { - unsafe { - let data = ptr::read(&self.data); - mem::forget(self); - Ok(data.into_inline().assume_init()) - } - } - } - - /// Retains only the elements specified by the predicate. - /// - /// In other words, remove all elements `e` such that `f(&e)` returns `false`. - /// This method operates in place and preserves the order of the retained - /// elements. - pub fn retain bool>(&mut self, mut f: F) { - let mut del = 0; - let len = self.len(); - for i in 0..len { - if !f(&mut self[i]) { - del += 1; - } else if del > 0 { - self.swap(i - del, i); - } - } - self.truncate(len - del); - } - - /// Removes consecutive duplicate elements. - pub fn dedup(&mut self) - where - A::Item: PartialEq, - { - self.dedup_by(|a, b| a == b); - } - - /// Removes consecutive duplicate elements using the given equality relation. - pub fn dedup_by(&mut self, mut same_bucket: F) - where - F: FnMut(&mut A::Item, &mut A::Item) -> bool, - { - // See the implementation of Vec::dedup_by in the - // standard library for an explanation of this algorithm. - let len = self.len(); - if len <= 1 { - return; - } - - let ptr = self.as_mut_ptr(); - let mut w: usize = 1; - - unsafe { - for r in 1..len { - let p_r = ptr.add(r); - let p_wm1 = ptr.add(w - 1); - if !same_bucket(&mut *p_r, &mut *p_wm1) { - if r != w { - let p_w = p_wm1.add(1); - mem::swap(&mut *p_r, &mut *p_w); - } - w += 1; - } - } - } - - self.truncate(w); - } - - /// Removes consecutive elements that map to the same key. - pub fn dedup_by_key(&mut self, mut key: F) - where - F: FnMut(&mut A::Item) -> K, - K: PartialEq, - { - self.dedup_by(|a, b| key(a) == key(b)); - } - - /// Creates a `SmallVec` directly from the raw components of another - /// `SmallVec`. - /// - /// # Safety - /// - /// This is highly unsafe, due to the number of invariants that aren't - /// checked: - /// - /// * `ptr` needs to have been previously allocated via `SmallVec` for its - /// spilled storage (at least, it's highly likely to be incorrect if it - /// wasn't). - /// * `ptr`'s `A::Item` type needs to be the same size and alignment that - /// it was allocated with - /// * `length` needs to be less than or equal to `capacity`. - /// * `capacity` needs to be the capacity that the pointer was allocated - /// with. - /// - /// Violating these may cause problems like corrupting the allocator's - /// internal data structures. - /// - /// Additionally, `capacity` must be greater than the amount of inline - /// storage `A` has; that is, the new `SmallVec` must need to spill over - /// into heap allocated storage. This condition is asserted against. - /// - /// The ownership of `ptr` is effectively transferred to the - /// `SmallVec` which may then deallocate, reallocate or change the - /// contents of memory pointed to by the pointer at will. Ensure - /// that nothing else uses the pointer after calling this - /// function. - /// - /// # Examples - /// - /// ``` - /// # #[macro_use] extern crate smallvec; - /// # use smallvec::SmallVec; - /// use std::mem; - /// use std::ptr; - /// - /// fn main() { - /// let mut v: SmallVec<[_; 1]> = smallvec![1, 2, 3]; - /// - /// // Pull out the important parts of `v`. - /// let p = v.as_mut_ptr(); - /// let len = v.len(); - /// let cap = v.capacity(); - /// let spilled = v.spilled(); - /// - /// unsafe { - /// // Forget all about `v`. The heap allocation that stored the - /// // three values won't be deallocated. - /// mem::forget(v); - /// - /// // Overwrite memory with [4, 5, 6]. - /// // - /// // This is only safe if `spilled` is true! Otherwise, we are - /// // writing into the old `SmallVec`'s inline storage on the - /// // stack. - /// assert!(spilled); - /// for i in 0..len { - /// ptr::write(p.add(i), 4 + i); - /// } - /// - /// // Put everything back together into a SmallVec with a different - /// // amount of inline storage, but which is still less than `cap`. - /// let rebuilt = SmallVec::<[_; 2]>::from_raw_parts(p, len, cap); - /// assert_eq!(&*rebuilt, &[4, 5, 6]); - /// } - /// } - #[inline] - pub unsafe fn from_raw_parts(ptr: *mut A::Item, length: usize, capacity: usize) -> SmallVec { - assert!(capacity > A::size()); - SmallVec { - capacity, - data: SmallVecData::from_heap(ptr, length), - } - } -} - -impl SmallVec -where - A::Item: Copy, -{ - /// Copy the elements from a slice into a new `SmallVec`. - /// - /// For slices of `Copy` types, this is more efficient than `SmallVec::from(slice)`. - pub fn from_slice(slice: &[A::Item]) -> Self { - let len = slice.len(); - if len <= A::size() { - SmallVec { - capacity: len, - data: SmallVecData::from_inline(unsafe { - let mut data: MaybeUninit = MaybeUninit::uninit(); - ptr::copy_nonoverlapping( - slice.as_ptr(), - data.as_mut_ptr() as *mut A::Item, - len, - ); - data - }), - } - } else { - let mut b = slice.to_vec(); - let (ptr, cap) = (b.as_mut_ptr(), b.capacity()); - mem::forget(b); - SmallVec { - capacity: cap, - data: SmallVecData::from_heap(ptr, len), - } - } - } - - /// Copy elements from a slice into the vector at position `index`, shifting any following - /// elements toward the back. - /// - /// For slices of `Copy` types, this is more efficient than `insert`. - pub fn insert_from_slice(&mut self, index: usize, slice: &[A::Item]) { - self.reserve(slice.len()); - - let len = self.len(); - assert!(index <= len); - - unsafe { - let slice_ptr = slice.as_ptr(); - let ptr = self.as_mut_ptr().add(index); - ptr::copy(ptr, ptr.add(slice.len()), len - index); - ptr::copy_nonoverlapping(slice_ptr, ptr, slice.len()); - self.set_len(len + slice.len()); - } - } - - /// Copy elements from a slice and append them to the vector. - /// - /// For slices of `Copy` types, this is more efficient than `extend`. - #[inline] - pub fn extend_from_slice(&mut self, slice: &[A::Item]) { - let len = self.len(); - self.insert_from_slice(len, slice); - } -} - -impl SmallVec -where - A::Item: Clone, -{ - /// Resizes the vector so that its length is equal to `len`. - /// - /// If `len` is less than the current length, the vector simply truncated. - /// - /// If `len` is greater than the current length, `value` is appended to the - /// vector until its length equals `len`. - pub fn resize(&mut self, len: usize, value: A::Item) { - let old_len = self.len(); - - if len > old_len { - self.extend(repeat(value).take(len - old_len)); - } else { - self.truncate(len); - } - } - - /// Creates a `SmallVec` with `n` copies of `elem`. - /// ``` - /// use smallvec::SmallVec; - /// - /// let v = SmallVec::<[char; 128]>::from_elem('d', 2); - /// assert_eq!(v, SmallVec::from_buf(['d', 'd'])); - /// ``` - pub fn from_elem(elem: A::Item, n: usize) -> Self { - if n > A::size() { - vec![elem; n].into() - } else { - let mut v = SmallVec::::new(); - unsafe { - let (ptr, len_ptr, _) = v.triple_mut(); - let mut local_len = SetLenOnDrop::new(len_ptr); - - for i in 0..n { - ::core::ptr::write(ptr.add(i), elem.clone()); - local_len.increment_len(1); - } - } - v - } - } -} - -impl ops::Deref for SmallVec { - type Target = [A::Item]; - #[inline] - fn deref(&self) -> &[A::Item] { - unsafe { - let (ptr, len, _) = self.triple(); - slice::from_raw_parts(ptr, len) - } - } -} - -impl ops::DerefMut for SmallVec { - #[inline] - fn deref_mut(&mut self) -> &mut [A::Item] { - unsafe { - let (ptr, &mut len, _) = self.triple_mut(); - slice::from_raw_parts_mut(ptr, len) - } - } -} - -impl AsRef<[A::Item]> for SmallVec { - #[inline] - fn as_ref(&self) -> &[A::Item] { - self - } -} - -impl AsMut<[A::Item]> for SmallVec { - #[inline] - fn as_mut(&mut self) -> &mut [A::Item] { - self - } -} - -impl Borrow<[A::Item]> for SmallVec { - #[inline] - fn borrow(&self) -> &[A::Item] { - self - } -} - -impl BorrowMut<[A::Item]> for SmallVec { - #[inline] - fn borrow_mut(&mut self) -> &mut [A::Item] { - self - } -} - -#[cfg(feature = "write")] -impl> io::Write for SmallVec { - #[inline] - fn write(&mut self, buf: &[u8]) -> io::Result { - self.extend_from_slice(buf); - Ok(buf.len()) - } - - #[inline] - fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { - self.extend_from_slice(buf); - Ok(()) - } - - #[inline] - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize for SmallVec -where - A::Item: Serialize, -{ - fn serialize(&self, serializer: S) -> Result { - let mut state = serializer.serialize_seq(Some(self.len()))?; - for item in self { - state.serialize_element(&item)?; - } - state.end() - } -} - -#[cfg(feature = "serde")] -impl<'de, A: Array> Deserialize<'de> for SmallVec -where - A::Item: Deserialize<'de>, -{ - fn deserialize>(deserializer: D) -> Result { - deserializer.deserialize_seq(SmallVecVisitor { - phantom: PhantomData, - }) - } -} - -#[cfg(feature = "serde")] -struct SmallVecVisitor { - phantom: PhantomData, -} - -#[cfg(feature = "serde")] -impl<'de, A: Array> Visitor<'de> for SmallVecVisitor -where - A::Item: Deserialize<'de>, -{ - type Value = SmallVec; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("a sequence") - } - - fn visit_seq(self, mut seq: B) -> Result - where - B: SeqAccess<'de>, - { - let len = seq.size_hint().unwrap_or(0); - let mut values = SmallVec::with_capacity(len); - - while let Some(value) = seq.next_element()? { - values.push(value); - } - - Ok(values) - } -} - -#[cfg(feature = "specialization")] -trait SpecFrom { - fn spec_from(slice: S) -> SmallVec; -} - -#[cfg(feature = "specialization")] -mod specialization; - -#[cfg(feature = "specialization")] -impl<'a, A: Array> SpecFrom for SmallVec -where - A::Item: Copy, -{ - #[inline] - fn spec_from(slice: &'a [A::Item]) -> SmallVec { - SmallVec::from_slice(slice) - } -} - -impl<'a, A: Array> From<&'a [A::Item]> for SmallVec -where - A::Item: Clone, -{ - #[cfg(not(feature = "specialization"))] - #[inline] - fn from(slice: &'a [A::Item]) -> SmallVec { - slice.iter().cloned().collect() - } - - #[cfg(feature = "specialization")] - #[inline] - fn from(slice: &'a [A::Item]) -> SmallVec { - SmallVec::spec_from(slice) - } -} - -impl From> for SmallVec { - #[inline] - fn from(vec: Vec) -> SmallVec { - SmallVec::from_vec(vec) - } -} - -impl From for SmallVec { - #[inline] - fn from(array: A) -> SmallVec { - SmallVec::from_buf(array) - } -} - -impl> ops::Index for SmallVec { - type Output = I::Output; - - fn index(&self, index: I) -> &I::Output { - &(**self)[index] - } -} - -impl> ops::IndexMut for SmallVec { - fn index_mut(&mut self, index: I) -> &mut I::Output { - &mut (&mut **self)[index] - } -} - -impl ExtendFromSlice for SmallVec -where - A::Item: Copy, -{ - fn extend_from_slice(&mut self, other: &[A::Item]) { - SmallVec::extend_from_slice(self, other) - } -} - -impl FromIterator for SmallVec { - #[inline] - fn from_iter>(iterable: I) -> SmallVec { - let mut v = SmallVec::new(); - v.extend(iterable); - v - } -} - -impl Extend for SmallVec { - fn extend>(&mut self, iterable: I) { - let mut iter = iterable.into_iter(); - let (lower_size_bound, _) = iter.size_hint(); - self.reserve(lower_size_bound); - - unsafe { - let (ptr, len_ptr, cap) = self.triple_mut(); - let mut len = SetLenOnDrop::new(len_ptr); - while len.get() < cap { - if let Some(out) = iter.next() { - ptr::write(ptr.add(len.get()), out); - len.increment_len(1); - } else { - return; - } - } - } - - for elem in iter { - self.push(elem); - } - } -} - -impl fmt::Debug for SmallVec -where - A::Item: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.iter()).finish() - } -} - -impl Default for SmallVec { - #[inline] - fn default() -> SmallVec { - SmallVec::new() - } -} - -#[cfg(feature = "may_dangle")] -unsafe impl<#[may_dangle] A: Array> Drop for SmallVec { - fn drop(&mut self) { - unsafe { - if self.spilled() { - let (ptr, len) = self.data.heap(); - Vec::from_raw_parts(ptr, len, self.capacity); - } else { - ptr::drop_in_place(&mut self[..]); - } - } - } -} - -#[cfg(not(feature = "may_dangle"))] -impl Drop for SmallVec { - fn drop(&mut self) { - unsafe { - if self.spilled() { - let (ptr, len) = self.data.heap(); - Vec::from_raw_parts(ptr, len, self.capacity); - } else { - ptr::drop_in_place(&mut self[..]); - } - } - } -} - -impl Clone for SmallVec -where - A::Item: Clone, -{ - #[inline] - fn clone(&self) -> SmallVec { - let mut new_vector = SmallVec::with_capacity(self.len()); - for element in self.iter() { - new_vector.push((*element).clone()) - } - new_vector - } -} - -impl PartialEq> for SmallVec -where - A::Item: PartialEq, -{ - #[inline] - fn eq(&self, other: &SmallVec) -> bool { - self[..] == other[..] - } -} - -impl Eq for SmallVec where A::Item: Eq {} - -impl PartialOrd for SmallVec -where - A::Item: PartialOrd, -{ - #[inline] - fn partial_cmp(&self, other: &SmallVec) -> Option { - PartialOrd::partial_cmp(&**self, &**other) - } -} - -impl Ord for SmallVec -where - A::Item: Ord, -{ - #[inline] - fn cmp(&self, other: &SmallVec) -> cmp::Ordering { - Ord::cmp(&**self, &**other) - } -} - -impl Hash for SmallVec -where - A::Item: Hash, -{ - fn hash(&self, state: &mut H) { - (**self).hash(state) - } -} - -unsafe impl Send for SmallVec where A::Item: Send {} - -/// An iterator that consumes a `SmallVec` and yields its items by value. -/// -/// Returned from [`SmallVec::into_iter`][1]. -/// -/// [1]: struct.SmallVec.html#method.into_iter -pub struct IntoIter { - data: SmallVec, - current: usize, - end: usize, -} - -impl fmt::Debug for IntoIter -where - A::Item: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("IntoIter").field(&self.as_slice()).finish() - } -} - -impl Clone for IntoIter -where - A::Item: Clone, -{ - fn clone(&self) -> IntoIter { - SmallVec::from(self.as_slice()).into_iter() - } -} - -impl Drop for IntoIter { - fn drop(&mut self) { - for _ in self {} - } -} - -impl Iterator for IntoIter { - type Item = A::Item; - - #[inline] - fn next(&mut self) -> Option { - if self.current == self.end { - None - } else { - unsafe { - let current = self.current; - self.current += 1; - Some(ptr::read(self.data.as_ptr().add(current))) - } - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let size = self.end - self.current; - (size, Some(size)) - } -} - -impl DoubleEndedIterator for IntoIter { - #[inline] - fn next_back(&mut self) -> Option { - if self.current == self.end { - None - } else { - unsafe { - self.end -= 1; - Some(ptr::read(self.data.as_ptr().add(self.end))) - } - } - } -} - -impl ExactSizeIterator for IntoIter {} -impl FusedIterator for IntoIter {} - -impl IntoIter { - /// Returns the remaining items of this iterator as a slice. - pub fn as_slice(&self) -> &[A::Item] { - let len = self.end - self.current; - unsafe { core::slice::from_raw_parts(self.data.as_ptr().add(self.current), len) } - } - - /// Returns the remaining items of this iterator as a mutable slice. - pub fn as_mut_slice(&mut self) -> &mut [A::Item] { - let len = self.end - self.current; - unsafe { core::slice::from_raw_parts_mut(self.data.as_mut_ptr().add(self.current), len) } - } -} - -impl IntoIterator for SmallVec { - type IntoIter = IntoIter; - type Item = A::Item; - fn into_iter(mut self) -> Self::IntoIter { - unsafe { - // Set SmallVec len to zero as `IntoIter` drop handles dropping of the elements - let len = self.len(); - self.set_len(0); - IntoIter { - data: self, - current: 0, - end: len, - } - } - } -} - -impl<'a, A: Array> IntoIterator for &'a SmallVec { - type IntoIter = slice::Iter<'a, A::Item>; - type Item = &'a A::Item; - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a, A: Array> IntoIterator for &'a mut SmallVec { - type IntoIter = slice::IterMut<'a, A::Item>; - type Item = &'a mut A::Item; - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - -/// Types that can be used as the backing store for a SmallVec -pub unsafe trait Array { - /// The type of the array's elements. - type Item; - /// Returns the number of items the array can hold. - fn size() -> usize; -} - -/// Set the length of the vec when the `SetLenOnDrop` value goes out of scope. -/// -/// Copied from https://github.com/rust-lang/rust/pull/36355 -struct SetLenOnDrop<'a> { - len: &'a mut usize, - local_len: usize, -} - -impl<'a> SetLenOnDrop<'a> { - #[inline] - fn new(len: &'a mut usize) -> Self { - SetLenOnDrop { - local_len: *len, - len, - } - } - - #[inline] - fn get(&self) -> usize { - self.local_len - } - - #[inline] - fn increment_len(&mut self, increment: usize) { - self.local_len += increment; - } -} - -impl<'a> Drop for SetLenOnDrop<'a> { - #[inline] - fn drop(&mut self) { - *self.len = self.local_len; - } -} - -#[cfg(feature = "const_generics")] -unsafe impl Array for [T; N] { - type Item = T; - fn size() -> usize { N } -} - -#[cfg(not(feature = "const_generics"))] -macro_rules! impl_array( - ($($size:expr),+) => { - $( - unsafe impl Array for [T; $size] { - type Item = T; - fn size() -> usize { $size } - } - )+ - } -); - -#[cfg(not(feature = "const_generics"))] -impl_array!( - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, 0x40, 0x60, 0x80, - 0x100, 0x200, 0x400, 0x600, 0x800, 0x1000, 0x2000, 0x4000, 0x6000, 0x8000, 0x10000, 0x20000, - 0x40000, 0x60000, 0x80000, 0x10_0000 -); - -/// Convenience trait for constructing a `SmallVec` -pub trait ToSmallVec { - /// Construct a new `SmallVec` from a slice. - fn to_smallvec(&self) -> SmallVec; -} - -impl ToSmallVec for [A::Item] - where A::Item: Copy { - #[inline] - fn to_smallvec(&self) -> SmallVec { - SmallVec::from_slice(self) - } -} - -#[cfg(test)] -mod tests { - use crate::SmallVec; - - use std::iter::FromIterator; - - use alloc::borrow::ToOwned; - use alloc::boxed::Box; - use alloc::rc::Rc; - use alloc::{vec, vec::Vec}; - - #[test] - pub fn test_zero() { - let mut v = SmallVec::<[_; 0]>::new(); - assert!(!v.spilled()); - v.push(0usize); - assert!(v.spilled()); - assert_eq!(&*v, &[0]); - } - - // We heap allocate all these strings so that double frees will show up under valgrind. - - #[test] - pub fn test_inline() { - let mut v = SmallVec::<[_; 16]>::new(); - v.push("hello".to_owned()); - v.push("there".to_owned()); - assert_eq!(&*v, &["hello".to_owned(), "there".to_owned(),][..]); - } - - #[test] - pub fn test_spill() { - let mut v = SmallVec::<[_; 2]>::new(); - v.push("hello".to_owned()); - assert_eq!(v[0], "hello"); - v.push("there".to_owned()); - v.push("burma".to_owned()); - assert_eq!(v[0], "hello"); - v.push("shave".to_owned()); - assert_eq!( - &*v, - &[ - "hello".to_owned(), - "there".to_owned(), - "burma".to_owned(), - "shave".to_owned(), - ][..] - ); - } - - #[test] - pub fn test_double_spill() { - let mut v = SmallVec::<[_; 2]>::new(); - v.push("hello".to_owned()); - v.push("there".to_owned()); - v.push("burma".to_owned()); - v.push("shave".to_owned()); - v.push("hello".to_owned()); - v.push("there".to_owned()); - v.push("burma".to_owned()); - v.push("shave".to_owned()); - assert_eq!( - &*v, - &[ - "hello".to_owned(), - "there".to_owned(), - "burma".to_owned(), - "shave".to_owned(), - "hello".to_owned(), - "there".to_owned(), - "burma".to_owned(), - "shave".to_owned(), - ][..] - ); - } - - /// https://github.com/servo/rust-smallvec/issues/4 - #[test] - fn issue_4() { - SmallVec::<[Box; 2]>::new(); - } - - /// https://github.com/servo/rust-smallvec/issues/5 - #[test] - fn issue_5() { - assert!(Some(SmallVec::<[&u32; 2]>::new()).is_some()); - } - - #[test] - fn test_with_capacity() { - let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(1); - assert!(v.is_empty()); - assert!(!v.spilled()); - assert_eq!(v.capacity(), 3); - - let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(10); - assert!(v.is_empty()); - assert!(v.spilled()); - assert_eq!(v.capacity(), 10); - } - - #[test] - fn drain() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - assert_eq!(v.drain(..).collect::>(), &[3]); - - // spilling the vec - v.push(3); - v.push(4); - v.push(5); - let old_capacity = v.capacity(); - assert_eq!(v.drain(1..).collect::>(), &[4, 5]); - // drain should not change the capacity - assert_eq!(v.capacity(), old_capacity); - } - - #[test] - fn drain_rev() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - assert_eq!(v.drain(..).rev().collect::>(), &[3]); - - // spilling the vec - v.push(3); - v.push(4); - v.push(5); - assert_eq!(v.drain(..).rev().collect::>(), &[5, 4, 3]); - } - - #[test] - fn drain_forget() { - let mut v: SmallVec<[u8; 1]> = smallvec![0, 1, 2, 3, 4, 5, 6, 7]; - std::mem::forget(v.drain(2..5)); - assert_eq!(v.len(), 2); - } - - #[test] - fn into_iter() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - assert_eq!(v.into_iter().collect::>(), &[3]); - - // spilling the vec - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - v.push(4); - v.push(5); - assert_eq!(v.into_iter().collect::>(), &[3, 4, 5]); - } - - #[test] - fn into_iter_rev() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - assert_eq!(v.into_iter().rev().collect::>(), &[3]); - - // spilling the vec - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - v.push(4); - v.push(5); - assert_eq!(v.into_iter().rev().collect::>(), &[5, 4, 3]); - } - - #[test] - fn into_iter_drop() { - use std::cell::Cell; - - struct DropCounter<'a>(&'a Cell); - - impl<'a> Drop for DropCounter<'a> { - fn drop(&mut self) { - self.0.set(self.0.get() + 1); - } - } - - { - let cell = Cell::new(0); - let mut v: SmallVec<[DropCounter<'_>; 2]> = SmallVec::new(); - v.push(DropCounter(&cell)); - v.into_iter(); - assert_eq!(cell.get(), 1); - } - - { - let cell = Cell::new(0); - let mut v: SmallVec<[DropCounter<'_>; 2]> = SmallVec::new(); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - assert!(v.into_iter().next().is_some()); - assert_eq!(cell.get(), 2); - } - - { - let cell = Cell::new(0); - let mut v: SmallVec<[DropCounter<'_>; 2]> = SmallVec::new(); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - assert!(v.into_iter().next().is_some()); - assert_eq!(cell.get(), 3); - } - { - let cell = Cell::new(0); - let mut v: SmallVec<[DropCounter<'_>; 2]> = SmallVec::new(); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - { - let mut it = v.into_iter(); - assert!(it.next().is_some()); - assert!(it.next_back().is_some()); - } - assert_eq!(cell.get(), 3); - } - } - - #[test] - fn test_capacity() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.reserve(1); - assert_eq!(v.capacity(), 2); - assert!(!v.spilled()); - - v.reserve_exact(0x100); - assert!(v.capacity() >= 0x100); - - v.push(0); - v.push(1); - v.push(2); - v.push(3); - - v.shrink_to_fit(); - assert!(v.capacity() < 0x100); - } - - #[test] - fn test_truncate() { - let mut v: SmallVec<[Box; 8]> = SmallVec::new(); - - for x in 0..8 { - v.push(Box::new(x)); - } - v.truncate(4); - - assert_eq!(v.len(), 4); - assert!(!v.spilled()); - - assert_eq!(*v.swap_remove(1), 1); - assert_eq!(*v.remove(1), 3); - v.insert(1, Box::new(3)); - - assert_eq!(&v.iter().map(|v| **v).collect::>(), &[0, 3, 2]); - } - - #[test] - fn test_insert_many() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.insert_many(1, [5, 6].iter().cloned()); - assert_eq!( - &v.iter().map(|v| *v).collect::>(), - &[0, 5, 6, 1, 2, 3] - ); - } - - struct MockHintIter { - x: T, - hint: usize, - } - impl Iterator for MockHintIter { - type Item = T::Item; - fn next(&mut self) -> Option { - self.x.next() - } - fn size_hint(&self) -> (usize, Option) { - (self.hint, None) - } - } - - #[test] - fn test_insert_many_short_hint() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.insert_many( - 1, - MockHintIter { - x: [5, 6].iter().cloned(), - hint: 5, - }, - ); - assert_eq!( - &v.iter().map(|v| *v).collect::>(), - &[0, 5, 6, 1, 2, 3] - ); - } - - #[test] - fn test_insert_many_long_hint() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.insert_many( - 1, - MockHintIter { - x: [5, 6].iter().cloned(), - hint: 1, - }, - ); - assert_eq!( - &v.iter().map(|v| *v).collect::>(), - &[0, 5, 6, 1, 2, 3] - ); - } - - #[test] - // https://github.com/servo/rust-smallvec/issues/96 - fn test_insert_many_panic() { - struct PanicOnDoubleDrop { - dropped: Box, - } - - impl Drop for PanicOnDoubleDrop { - fn drop(&mut self) { - assert!(!*self.dropped, "already dropped"); - *self.dropped = true; - } - } - - struct BadIter; - impl Iterator for BadIter { - type Item = PanicOnDoubleDrop; - fn size_hint(&self) -> (usize, Option) { - (1, None) - } - fn next(&mut self) -> Option { - panic!() - } - } - - // These boxes are leaked on purpose by panicking `insert_many`, - // so we clean them up manually to appease Miri's leak checker. - let mut box1 = Box::new(false); - let mut box2 = Box::new(false); - - let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> = vec![ - PanicOnDoubleDrop { - dropped: unsafe { Box::from_raw(&mut *box1) }, - }, - PanicOnDoubleDrop { - dropped: unsafe { Box::from_raw(&mut *box2) }, - }, - ] - .into(); - let result = ::std::panic::catch_unwind(move || { - vec.insert_many(0, BadIter); - }); - assert!(result.is_err()); - - drop(box1); - drop(box2); - } - - #[test] - #[should_panic] - fn test_invalid_grow() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - v.extend(0..8); - v.grow(5); - } - - #[test] - fn test_insert_from_slice() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.insert_from_slice(1, &[5, 6]); - assert_eq!( - &v.iter().map(|v| *v).collect::>(), - &[0, 5, 6, 1, 2, 3] - ); - } - - #[test] - fn test_extend_from_slice() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.extend_from_slice(&[5, 6]); - assert_eq!( - &v.iter().map(|v| *v).collect::>(), - &[0, 1, 2, 3, 5, 6] - ); - } - - #[test] - #[should_panic] - fn test_drop_panic_smallvec() { - // This test should only panic once, and not double panic, - // which would mean a double drop - struct DropPanic; - - impl Drop for DropPanic { - fn drop(&mut self) { - panic!("drop"); - } - } - - let mut v = SmallVec::<[_; 1]>::new(); - v.push(DropPanic); - } - - #[test] - fn test_eq() { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - let mut b: SmallVec<[u32; 2]> = SmallVec::new(); - let mut c: SmallVec<[u32; 2]> = SmallVec::new(); - // a = [1, 2] - a.push(1); - a.push(2); - // b = [1, 2] - b.push(1); - b.push(2); - // c = [3, 4] - c.push(3); - c.push(4); - - assert!(a == b); - assert!(a != c); - } - - #[test] - fn test_ord() { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - let mut b: SmallVec<[u32; 2]> = SmallVec::new(); - let mut c: SmallVec<[u32; 2]> = SmallVec::new(); - // a = [1] - a.push(1); - // b = [1, 1] - b.push(1); - b.push(1); - // c = [1, 2] - c.push(1); - c.push(2); - - assert!(a < b); - assert!(b > a); - assert!(b < c); - assert!(c > b); - } - - #[test] - fn test_hash() { - use std::collections::hash_map::DefaultHasher; - use std::hash::Hash; - - { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - let b = [1, 2]; - a.extend(b.iter().cloned()); - let mut hasher = DefaultHasher::new(); - assert_eq!(a.hash(&mut hasher), b.hash(&mut hasher)); - } - { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - let b = [1, 2, 11, 12]; - a.extend(b.iter().cloned()); - let mut hasher = DefaultHasher::new(); - assert_eq!(a.hash(&mut hasher), b.hash(&mut hasher)); - } - } - - #[test] - fn test_as_ref() { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - a.push(1); - assert_eq!(a.as_ref(), [1]); - a.push(2); - assert_eq!(a.as_ref(), [1, 2]); - a.push(3); - assert_eq!(a.as_ref(), [1, 2, 3]); - } - - #[test] - fn test_as_mut() { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - a.push(1); - assert_eq!(a.as_mut(), [1]); - a.push(2); - assert_eq!(a.as_mut(), [1, 2]); - a.push(3); - assert_eq!(a.as_mut(), [1, 2, 3]); - a.as_mut()[1] = 4; - assert_eq!(a.as_mut(), [1, 4, 3]); - } - - #[test] - fn test_borrow() { - use std::borrow::Borrow; - - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - a.push(1); - assert_eq!(a.borrow(), [1]); - a.push(2); - assert_eq!(a.borrow(), [1, 2]); - a.push(3); - assert_eq!(a.borrow(), [1, 2, 3]); - } - - #[test] - fn test_borrow_mut() { - use std::borrow::BorrowMut; - - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - a.push(1); - assert_eq!(a.borrow_mut(), [1]); - a.push(2); - assert_eq!(a.borrow_mut(), [1, 2]); - a.push(3); - assert_eq!(a.borrow_mut(), [1, 2, 3]); - BorrowMut::<[u32]>::borrow_mut(&mut a)[1] = 4; - assert_eq!(a.borrow_mut(), [1, 4, 3]); - } - - #[test] - fn test_from() { - assert_eq!(&SmallVec::<[u32; 2]>::from(&[1][..])[..], [1]); - assert_eq!(&SmallVec::<[u32; 2]>::from(&[1, 2, 3][..])[..], [1, 2, 3]); - - let vec = vec![]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from(vec); - assert_eq!(&*small_vec, &[]); - drop(small_vec); - - let vec = vec![1, 2, 3, 4, 5]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from(vec); - assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - drop(small_vec); - - let vec = vec![1, 2, 3, 4, 5]; - let small_vec: SmallVec<[u8; 1]> = SmallVec::from(vec); - assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - drop(small_vec); - - let array = [1]; - let small_vec: SmallVec<[u8; 1]> = SmallVec::from(array); - assert_eq!(&*small_vec, &[1]); - drop(small_vec); - - let array = [99; 128]; - let small_vec: SmallVec<[u8; 128]> = SmallVec::from(array); - assert_eq!(&*small_vec, vec![99u8; 128].as_slice()); - drop(small_vec); - } - - #[test] - fn test_from_slice() { - assert_eq!(&SmallVec::<[u32; 2]>::from_slice(&[1][..])[..], [1]); - assert_eq!( - &SmallVec::<[u32; 2]>::from_slice(&[1, 2, 3][..])[..], - [1, 2, 3] - ); - } - - #[test] - fn test_exact_size_iterator() { - let mut vec = SmallVec::<[u32; 2]>::from(&[1, 2, 3][..]); - assert_eq!(vec.clone().into_iter().len(), 3); - assert_eq!(vec.drain(..2).len(), 2); - assert_eq!(vec.into_iter().len(), 1); - } - - #[test] - fn test_into_iter_as_slice() { - let vec = SmallVec::<[u32; 2]>::from(&[1, 2, 3][..]); - let mut iter = vec.clone().into_iter(); - assert_eq!(iter.as_slice(), &[1, 2, 3]); - assert_eq!(iter.as_mut_slice(), &[1, 2, 3]); - iter.next(); - assert_eq!(iter.as_slice(), &[2, 3]); - assert_eq!(iter.as_mut_slice(), &[2, 3]); - iter.next_back(); - assert_eq!(iter.as_slice(), &[2]); - assert_eq!(iter.as_mut_slice(), &[2]); - } - - #[test] - fn test_into_iter_clone() { - // Test that the cloned iterator yields identical elements and that it owns its own copy - // (i.e. no use after move errors). - let mut iter = SmallVec::<[u8; 2]>::from_iter(0..3).into_iter(); - let mut clone_iter = iter.clone(); - while let Some(x) = iter.next() { - assert_eq!(x, clone_iter.next().unwrap()); - } - assert_eq!(clone_iter.next(), None); - } - - #[test] - fn test_into_iter_clone_partially_consumed_iterator() { - // Test that the cloned iterator only contains the remaining elements of the original iterator. - let mut iter = SmallVec::<[u8; 2]>::from_iter(0..3).into_iter().skip(1); - let mut clone_iter = iter.clone(); - while let Some(x) = iter.next() { - assert_eq!(x, clone_iter.next().unwrap()); - } - assert_eq!(clone_iter.next(), None); - } - - #[test] - fn test_into_iter_clone_empty_smallvec() { - let mut iter = SmallVec::<[u8; 2]>::new().into_iter(); - let mut clone_iter = iter.clone(); - assert_eq!(iter.next(), None); - assert_eq!(clone_iter.next(), None); - } - - #[test] - fn shrink_to_fit_unspill() { - let mut vec = SmallVec::<[u8; 2]>::from_iter(0..3); - vec.pop(); - assert!(vec.spilled()); - vec.shrink_to_fit(); - assert!(!vec.spilled(), "shrink_to_fit will un-spill if possible"); - } - - #[test] - fn test_into_vec() { - let vec = SmallVec::<[u8; 2]>::from_iter(0..2); - assert_eq!(vec.into_vec(), vec![0, 1]); - - let vec = SmallVec::<[u8; 2]>::from_iter(0..3); - assert_eq!(vec.into_vec(), vec![0, 1, 2]); - } - - #[test] - fn test_into_inner() { - let vec = SmallVec::<[u8; 2]>::from_iter(0..2); - assert_eq!(vec.into_inner(), Ok([0, 1])); - - let vec = SmallVec::<[u8; 2]>::from_iter(0..1); - assert_eq!(vec.clone().into_inner(), Err(vec)); - - let vec = SmallVec::<[u8; 2]>::from_iter(0..3); - assert_eq!(vec.clone().into_inner(), Err(vec)); - } - - #[test] - fn test_from_vec() { - let vec = vec![]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[]); - drop(small_vec); - - let vec = vec![]; - let small_vec: SmallVec<[u8; 1]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[]); - drop(small_vec); - - let vec = vec![1]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[1]); - drop(small_vec); - - let vec = vec![1, 2, 3]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[1, 2, 3]); - drop(small_vec); - - let vec = vec![1, 2, 3, 4, 5]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - drop(small_vec); - - let vec = vec![1, 2, 3, 4, 5]; - let small_vec: SmallVec<[u8; 1]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - drop(small_vec); - } - - #[test] - fn test_retain() { - // Test inline data storate - let mut sv: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 2, 3, 3, 4]); - sv.retain(|&mut i| i != 3); - assert_eq!(sv.pop(), Some(4)); - assert_eq!(sv.pop(), Some(2)); - assert_eq!(sv.pop(), Some(1)); - assert_eq!(sv.pop(), None); - - // Test spilled data storage - let mut sv: SmallVec<[i32; 3]> = SmallVec::from_slice(&[1, 2, 3, 3, 4]); - sv.retain(|&mut i| i != 3); - assert_eq!(sv.pop(), Some(4)); - assert_eq!(sv.pop(), Some(2)); - assert_eq!(sv.pop(), Some(1)); - assert_eq!(sv.pop(), None); - - // Test that drop implementations are called for inline. - let one = Rc::new(1); - let mut sv: SmallVec<[Rc; 3]> = SmallVec::new(); - sv.push(Rc::clone(&one)); - assert_eq!(Rc::strong_count(&one), 2); - sv.retain(|_| false); - assert_eq!(Rc::strong_count(&one), 1); - - // Test that drop implementations are called for spilled data. - let mut sv: SmallVec<[Rc; 1]> = SmallVec::new(); - sv.push(Rc::clone(&one)); - sv.push(Rc::new(2)); - assert_eq!(Rc::strong_count(&one), 2); - sv.retain(|_| false); - assert_eq!(Rc::strong_count(&one), 1); - } - - #[test] - fn test_dedup() { - let mut dupes: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 1, 2, 3, 3]); - dupes.dedup(); - assert_eq!(&*dupes, &[1, 2, 3]); - - let mut empty: SmallVec<[i32; 5]> = SmallVec::new(); - empty.dedup(); - assert!(empty.is_empty()); - - let mut all_ones: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 1, 1, 1, 1]); - all_ones.dedup(); - assert_eq!(all_ones.len(), 1); - - let mut no_dupes: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 2, 3, 4, 5]); - no_dupes.dedup(); - assert_eq!(no_dupes.len(), 5); - } - - #[test] - fn test_resize() { - let mut v: SmallVec<[i32; 8]> = SmallVec::new(); - v.push(1); - v.resize(5, 0); - assert_eq!(v[..], [1, 0, 0, 0, 0][..]); - - v.resize(2, -1); - assert_eq!(v[..], [1, 0][..]); - } - - #[cfg(feature = "write")] - #[test] - fn test_write() { - use std::io::Write; - - let data = [1, 2, 3, 4, 5]; - - let mut small_vec: SmallVec<[u8; 2]> = SmallVec::new(); - let len = small_vec.write(&data[..]).unwrap(); - assert_eq!(len, 5); - assert_eq!(small_vec.as_ref(), data.as_ref()); - - let mut small_vec: SmallVec<[u8; 2]> = SmallVec::new(); - small_vec.write_all(&data[..]).unwrap(); - assert_eq!(small_vec.as_ref(), data.as_ref()); - } - - #[cfg(feature = "serde")] - extern crate bincode; - - #[cfg(feature = "serde")] - #[test] - fn test_serde() { - use self::bincode::{config, deserialize}; - let mut small_vec: SmallVec<[i32; 2]> = SmallVec::new(); - small_vec.push(1); - let encoded = config().limit(100).serialize(&small_vec).unwrap(); - let decoded: SmallVec<[i32; 2]> = deserialize(&encoded).unwrap(); - assert_eq!(small_vec, decoded); - small_vec.push(2); - // Spill the vec - small_vec.push(3); - small_vec.push(4); - // Check again after spilling. - let encoded = config().limit(100).serialize(&small_vec).unwrap(); - let decoded: SmallVec<[i32; 2]> = deserialize(&encoded).unwrap(); - assert_eq!(small_vec, decoded); - } - - #[test] - fn grow_to_shrink() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(1); - v.push(2); - v.push(3); - assert!(v.spilled()); - v.clear(); - // Shrink to inline. - v.grow(2); - assert!(!v.spilled()); - assert_eq!(v.capacity(), 2); - assert_eq!(v.len(), 0); - v.push(4); - assert_eq!(v[..], [4]); - } - - #[test] - fn resumable_extend() { - let s = "a b c"; - // This iterator yields: (Some('a'), None, Some('b'), None, Some('c')), None - let it = s - .chars() - .scan(0, |_, ch| if ch.is_whitespace() { None } else { Some(ch) }); - let mut v: SmallVec<[char; 4]> = SmallVec::new(); - v.extend(it); - assert_eq!(v[..], ['a']); - } - - // #139 - #[test] - fn uninhabited() { - enum Void {} - let _sv = SmallVec::<[Void; 8]>::new(); - } - - #[test] - fn grow_spilled_same_size() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(0); - v.push(1); - v.push(2); - assert!(v.spilled()); - assert_eq!(v.capacity(), 4); - // grow with the same capacity - v.grow(4); - assert_eq!(v.capacity(), 4); - assert_eq!(v[..], [0, 1, 2]); - } - - #[cfg(feature = "const_generics")] - #[test] - fn const_generics() { - let _v = SmallVec::<[i32; 987]>::default(); - } -} diff -Nru cargo-0.44.1/vendor/smallvec/LICENSE-APACHE cargo-0.47.0/vendor/smallvec/LICENSE-APACHE --- cargo-0.44.1/vendor/smallvec/LICENSE-APACHE 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/smallvec/LICENSE-APACHE 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru cargo-0.44.1/vendor/smallvec/LICENSE-MIT cargo-0.47.0/vendor/smallvec/LICENSE-MIT --- cargo-0.44.1/vendor/smallvec/LICENSE-MIT 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/smallvec/LICENSE-MIT 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -Copyright (c) 2018 The Servo Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru cargo-0.44.1/vendor/smallvec/README.md cargo-0.47.0/vendor/smallvec/README.md --- cargo-0.44.1/vendor/smallvec/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/smallvec/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -rust-smallvec -============= - -[Documentation](https://docs.rs/smallvec/) - -[Release notes](https://github.com/servo/rust-smallvec/releases) - -"Small vector" optimization for Rust: store up to a small number of items on the stack - -## Example - -```rust -use smallvec::{SmallVec, smallvec}; - -// This SmallVec can hold up to 4 items on the stack: -let mut v: SmallVec<[i32; 4]> = smallvec![1, 2, 3, 4]; - -// It will automatically move its contents to the heap if -// contains more than four items: -v.push(5); - -// SmallVec points to a slice, so you can use normal slice -// indexing and other methods to access its contents: -v[0] = v[1] + v[2]; -v.sort(); -``` diff -Nru cargo-0.44.1/vendor/smallvec/scripts/run_miri.sh cargo-0.47.0/vendor/smallvec/scripts/run_miri.sh --- cargo-0.44.1/vendor/smallvec/scripts/run_miri.sh 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/smallvec/scripts/run_miri.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -#!/usr/bin/bash - -set -ex - -# Clean out our target dir, which may have artifacts compiled by a version of -# rust different from the one we're about to download. -cargo clean - -# Install and run the latest version of nightly where miri built successfully. -# Taken from: https://github.com/rust-lang/miri#running-miri-on-ci - -MIRI_NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri) -echo "Installing latest nightly with Miri: $MIRI_NIGHTLY" -rustup default "$MIRI_NIGHTLY" - -rustup component add miri -cargo miri setup - -cargo miri test --verbose -cargo miri test --verbose --features union -cargo miri test --verbose --all-features diff -Nru cargo-0.44.1/vendor/smallvec/specialization.rs cargo-0.47.0/vendor/smallvec/specialization.rs --- cargo-0.44.1/vendor/smallvec/specialization.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/smallvec/specialization.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Implementations that require `default fn`. - -use super::{Array, SmallVec, SpecFrom}; - -impl<'a, A: Array> SpecFrom for SmallVec -where - A::Item: Clone, -{ - #[inline] - default fn spec_from(slice: &'a [A::Item]) -> SmallVec { - slice.into_iter().cloned().collect() - } -} diff -Nru cargo-0.44.1/vendor/socket2/.cargo-checksum.json cargo-0.47.0/vendor/socket2/.cargo-checksum.json --- cargo-0.44.1/vendor/socket2/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/socket2/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918"} \ No newline at end of file +{"files":{},"package":"b1fa70dc5c8104ec096f4fe7ede7a221d35ae13dcd19ba1ad9a81d2cab9a1c44"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/socket2/Cargo.toml cargo-0.47.0/vendor/socket2/Cargo.toml --- cargo-0.44.1/vendor/socket2/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/socket2/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "socket2" -version = "0.3.12" +version = "0.3.15" authors = ["Alex Crichton "] description = "Utilities for handling networking sockets with a maximal amount of configuration\npossible intended.\n" homepage = "https://github.com/alexcrichton/socket2-rs" diff -Nru cargo-0.44.1/vendor/socket2/src/socket.rs cargo-0.47.0/vendor/socket2/src/socket.rs --- cargo-0.44.1/vendor/socket2/src/socket.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/socket2/src/socket.rs 2020-10-01 21:38:28.000000000 +0000 @@ -285,8 +285,12 @@ /// `recvfrom` call. /// /// [`recv_from`]: #method.recv_from - pub fn recv_from_with_flags(&self, buf: &mut [u8], flags: i32) -> io::Result { - self.inner.recv(buf, flags) + pub fn recv_from_with_flags( + &self, + buf: &mut [u8], + flags: i32, + ) -> io::Result<(usize, SockAddr)> { + self.inner.recv_from(buf, flags) } /// Receives data from the socket, without removing it from the queue. @@ -314,7 +318,7 @@ /// `send` call. /// /// [`send`]: #method.send - pub fn send_with_flags(&self, buf: &mut [u8], flags: i32) -> io::Result { + pub fn send_with_flags(&self, buf: &[u8], flags: i32) -> io::Result { self.inner.send(buf, flags) } @@ -325,7 +329,7 @@ /// /// [`send`]: #method.send /// [`out_of_band_inline`]: #method.out_of_band_inline - pub fn send_out_of_band(&self, buf: &mut [u8]) -> io::Result { + pub fn send_out_of_band(&self, buf: &[u8]) -> io::Result { self.inner.send(buf, MSG_OOB) } @@ -342,8 +346,8 @@ /// `sendto` call. /// /// [`send_to`]: #method.send_to - pub fn send_to_with_flags(&self, buf: &mut [u8], flags: i32) -> io::Result { - self.inner.recv(buf, flags) + pub fn send_to_with_flags(&self, buf: &[u8], addr: &SockAddr, flags: i32) -> io::Result { + self.inner.send_to(buf, flags, addr) } // ================================================ diff -Nru cargo-0.44.1/vendor/socket2/src/sys/unix.rs cargo-0.47.0/vendor/socket2/src/sys/unix.rs --- cargo-0.44.1/vendor/socket2/src/sys/unix.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/socket2/src/sys/unix.rs 2020-10-01 21:38:28.000000000 +0000 @@ -188,7 +188,7 @@ let fd = cvt(libc::socket(family, ty, protocol))?; let fd = Socket::from_raw_fd(fd); set_cloexec(fd.as_raw_fd())?; - #[cfg(target_os = "macos")] + #[cfg(any(target_os = "macos", target_os = "ios"))] { fd.setsockopt(libc::SOL_SOCKET, libc::SO_NOSIGPIPE, 1i32)?; } @@ -203,7 +203,7 @@ let fds = (Socket::from_raw_fd(fds[0]), Socket::from_raw_fd(fds[1])); set_cloexec(fds.0.as_raw_fd())?; set_cloexec(fds.1.as_raw_fd())?; - #[cfg(target_os = "macos")] + #[cfg(any(target_os = "macos", target_os = "ios"))] { fds.0 .setsockopt(libc::SOL_SOCKET, libc::SO_NOSIGPIPE, 1i32)?; diff -Nru cargo-0.44.1/vendor/syn/benches/rust.rs cargo-0.47.0/vendor/syn/benches/rust.rs --- cargo-0.44.1/vendor/syn/benches/rust.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/syn/benches/rust.rs 2020-10-01 21:38:28.000000000 +0000 @@ -36,7 +36,6 @@ #[cfg(not(syn_only))] mod librustc_parse { - extern crate rustc_ast; extern crate rustc_data_structures; extern crate rustc_errors; extern crate rustc_parse; @@ -59,7 +58,7 @@ } } - rustc_ast::with_globals(Edition::Edition2018, || { + rustc_span::with_session_globals(Edition::Edition2018, || { let cm = Lrc::new(SourceMap::new(FilePathMapping::empty())); let emitter = Box::new(SilentEmitter); let handler = Handler::with_emitter(false, None, emitter); diff -Nru cargo-0.44.1/vendor/syn/.cargo-checksum.json cargo-0.47.0/vendor/syn/.cargo-checksum.json --- cargo-0.44.1/vendor/syn/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/syn/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"ef781e621ee763a2a40721a8861ec519cb76966aee03bb5d00adb6a31dc1c1de"} \ No newline at end of file +{"files":{},"package":"9c51d92969d209b54a98397e1b91c8ae82d8c87a7bb87df0b29aa2ad81454228"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/syn/Cargo.toml cargo-0.47.0/vendor/syn/Cargo.toml --- cargo-0.44.1/vendor/syn/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/syn/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "syn" -version = "1.0.27" +version = "1.0.42" authors = ["David Tolnay "] include = ["/benches/**", "/build.rs", "/Cargo.toml", "/LICENSE-APACHE", "/LICENSE-MIT", "/README.md", "/src/**", "/tests/**"] description = "Parser for Rust source code" @@ -38,7 +38,7 @@ name = "file" required-features = ["full", "parsing"] [dependencies.proc-macro2] -version = "1.0.13" +version = "1.0.23" default-features = false [dependencies.quote] diff -Nru cargo-0.44.1/vendor/syn/README.md cargo-0.47.0/vendor/syn/README.md --- cargo-0.44.1/vendor/syn/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/syn/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -46,10 +46,6 @@ [`syn::DeriveInput`]: https://docs.rs/syn/1.0/syn/struct.DeriveInput.html [parser functions]: https://docs.rs/syn/1.0/syn/parse/index.html -If you get stuck with anything involving procedural macros in Rust I am happy to -provide help even if the issue is not related to Syn. Please file a ticket in -this repo. - *Version requirement: Syn supports rustc 1.31 and up.* [*Release notes*](https://github.com/dtolnay/syn/releases) @@ -88,8 +84,6 @@ ``` ```rust -extern crate proc_macro; - use proc_macro::TokenStream; use quote::quote; use syn::{parse_macro_input, DeriveInput}; diff -Nru cargo-0.44.1/vendor/syn/src/attr.rs cargo-0.47.0/vendor/syn/src/attr.rs --- cargo-0.44.1/vendor/syn/src/attr.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/syn/src/attr.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,18 +1,12 @@ use super::*; use crate::punctuated::Punctuated; - -use std::iter; - use proc_macro2::TokenStream; +use std::iter; #[cfg(feature = "parsing")] use crate::parse::{Parse, ParseBuffer, ParseStream, Parser, Result}; #[cfg(feature = "parsing")] use crate::punctuated::Pair; -#[cfg(feature = "extra-traits")] -use crate::tt::TokenStreamHelper; -#[cfg(feature = "extra-traits")] -use std::hash::{Hash, Hasher}; ast_struct! { /// An attribute like `#[repr(transparent)]`. @@ -139,8 +133,10 @@ /// let attr: ItemMod = parse_quote! { /// #[doc = r" Single line doc comments"] /// #[doc = r" We write so many!"] - /// #[doc = r" Multi-line comments... - /// May span many lines"] + /// #[doc = r" + /// * Multi-line comments... + /// * May span many lines + /// "] /// mod example { /// #![doc = r" Of course, they can be inner too"] /// #![doc = r" And fit in a single line "] @@ -148,7 +144,7 @@ /// }; /// assert_eq!(doc, attr); /// ``` - pub struct Attribute #manual_extra_traits { + pub struct Attribute { pub pound_token: Token![#], pub style: AttrStyle, pub bracket_token: token::Bracket, @@ -157,34 +153,6 @@ } } -#[cfg(feature = "extra-traits")] -impl Eq for Attribute {} - -#[cfg(feature = "extra-traits")] -impl PartialEq for Attribute { - fn eq(&self, other: &Self) -> bool { - self.style == other.style - && self.pound_token == other.pound_token - && self.bracket_token == other.bracket_token - && self.path == other.path - && TokenStreamHelper(&self.tokens) == TokenStreamHelper(&other.tokens) - } -} - -#[cfg(feature = "extra-traits")] -impl Hash for Attribute { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - self.style.hash(state); - self.pound_token.hash(state); - self.bracket_token.hash(state); - self.path.hash(state); - TokenStreamHelper(&self.tokens).hash(state); - } -} - impl Attribute { /// Parses the content of the attribute, consisting of the path and tokens, /// as a [`Meta`] if possible. @@ -263,7 +231,7 @@ #[cfg(feature = "parsing")] pub fn parse_outer(input: ParseStream) -> Result> { let mut attrs = Vec::new(); - while input.peek(Token![#]) && !input.peek(token::Group) { + while input.peek(Token![#]) { attrs.push(input.call(parsing::single_parse_outer)?); } Ok(attrs) @@ -276,7 +244,7 @@ #[cfg(feature = "parsing")] pub fn parse_inner(input: ParseStream) -> Result> { let mut attrs = Vec::new(); - while input.peek(Token![#]) && input.peek2(Token![!]) && !input.peek(token::Group) { + while input.peek(Token![#]) && input.peek2(Token![!]) { attrs.push(input.call(parsing::single_parse_inner)?); } Ok(attrs) @@ -353,7 +321,6 @@ /// - `#![feature(proc_macro)]` /// - `//! # Example` /// - `/*! Please file an issue */` - #[cfg_attr(feature = "clone-impls", derive(Copy))] pub enum AttrStyle { Outer, Inner(Token![!]), @@ -470,8 +437,8 @@ /// as type `AttributeArgs`. /// /// ``` -/// extern crate proc_macro; -/// +/// # extern crate proc_macro; +/// # /// use proc_macro::TokenStream; /// use syn::{parse_macro_input, AttributeArgs, ItemFn}; /// @@ -505,7 +472,7 @@ fn is_outer(attr: &&Attribute) -> bool { match attr.style { AttrStyle::Outer => true, - _ => false, + AttrStyle::Inner(_) => false, } } self.into_iter().filter(is_outer) @@ -515,7 +482,7 @@ fn is_inner(attr: &&Attribute) -> bool { match attr.style { AttrStyle::Inner(_) => true, - _ => false, + AttrStyle::Outer => false, } } self.into_iter().filter(is_inner) @@ -525,7 +492,6 @@ #[cfg(feature = "parsing")] pub mod parsing { use super::*; - use crate::ext::IdentExt; use crate::parse::{Parse, ParseStream, Result}; #[cfg(feature = "full")] diff -Nru cargo-0.44.1/vendor/syn/src/buffer.rs cargo-0.47.0/vendor/syn/src/buffer.rs --- cargo-0.44.1/vendor/syn/src/buffer.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/syn/src/buffer.rs 2020-10-01 21:38:28.000000000 +0000 @@ -12,13 +12,11 @@ feature = "proc-macro" ))] use crate::proc_macro as pm; +use crate::Lifetime; use proc_macro2::{Delimiter, Group, Ident, Literal, Punct, Spacing, Span, TokenStream, TokenTree}; - use std::marker::PhantomData; use std::ptr; -use crate::Lifetime; - /// Internal type which is used instead of `TokenTree` to represent a token tree /// within a `TokenBuffer`. enum Entry { @@ -134,7 +132,6 @@ /// stream, and have the same scope. /// /// *This type is available only if Syn is built with the `"parsing"` feature.* -#[derive(Copy, Clone, Eq, PartialEq)] pub struct Cursor<'a> { // The current entry which the `Cursor` is pointing at. ptr: *const Entry, @@ -201,13 +198,13 @@ Cursor::create(self.ptr.offset(1), self.scope) } - /// If the cursor is looking at a `None`-delimited group, move it to look at - /// the first token inside instead. If the group is empty, this will move + /// While the cursor is looking at a `None`-delimited group, move it to look + /// at the first token inside instead. If the group is empty, this will move /// the cursor past the `None`-delimited group. /// /// WARNING: This mutates its argument. fn ignore_none(&mut self) { - if let Entry::Group(group, buf) = self.entry() { + while let Entry::Group(group, buf) = self.entry() { if group.delimiter() == Delimiter::None { // NOTE: We call `Cursor::create` here to make sure that // situations where we should immediately exit the span after @@ -215,6 +212,8 @@ unsafe { *self = Cursor::create(&buf.data[0], self.scope); } + } else { + break; } } } @@ -363,6 +362,24 @@ } } +impl<'a> Copy for Cursor<'a> {} + +impl<'a> Clone for Cursor<'a> { + fn clone(&self) -> Self { + *self + } +} + +impl<'a> Eq for Cursor<'a> {} + +impl<'a> PartialEq for Cursor<'a> { + fn eq(&self, other: &Self) -> bool { + let Cursor { ptr, scope, marker } = self; + let _ = marker; + *ptr == other.ptr && *scope == other.scope + } +} + pub(crate) fn same_scope(a: Cursor, b: Cursor) -> bool { a.scope == b.scope } diff -Nru cargo-0.44.1/vendor/syn/src/custom_keyword.rs cargo-0.47.0/vendor/syn/src/custom_keyword.rs --- cargo-0.44.1/vendor/syn/src/custom_keyword.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/syn/src/custom_keyword.rs 2020-10-01 21:38:28.000000000 +0000 @@ -26,8 +26,8 @@ /// /// - Field access to its span — `let sp = whatever_token.span` /// -/// [Peeking]: parse::ParseBuffer::peek -/// [Parsing]: parse::ParseBuffer::parse +/// [Peeking]: crate::parse::ParseBuffer::peek +/// [Parsing]: crate::parse::ParseBuffer::parse /// [Printing]: quote::ToTokens /// [`Span`]: proc_macro2::Span /// diff -Nru cargo-0.44.1/vendor/syn/src/custom_punctuation.rs cargo-0.47.0/vendor/syn/src/custom_punctuation.rs --- cargo-0.44.1/vendor/syn/src/custom_punctuation.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/syn/src/custom_punctuation.rs 2020-10-01 21:38:28.000000000 +0000 @@ -22,8 +22,8 @@ /// /// - Field access to its spans — `let spans = lrarrow.spans` /// -/// [Peeking]: parse::ParseBuffer::peek -/// [Parsing]: parse::ParseBuffer::parse +/// [Peeking]: crate::parse::ParseBuffer::peek +/// [Parsing]: crate::parse::ParseBuffer::parse /// [Printing]: quote::ToTokens /// [`Span`]: proc_macro2::Span /// diff -Nru cargo-0.44.1/vendor/syn/src/data.rs cargo-0.47.0/vendor/syn/src/data.rs --- cargo-0.44.1/vendor/syn/src/data.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/syn/src/data.rs 2020-10-01 21:38:28.000000000 +0000 @@ -236,7 +236,6 @@ #[cfg(feature = "parsing")] pub mod parsing { use super::*; - use crate::ext::IdentExt; use crate::parse::discouraged::Speculative; use crate::parse::{Parse, ParseStream, Result}; @@ -316,6 +315,17 @@ impl Parse for Visibility { fn parse(input: ParseStream) -> Result { + // Recognize an empty None-delimited group, as produced by a $:vis + // matcher that matched no tokens. + if input.peek(token::Group) { + let ahead = input.fork(); + let group = crate::group::parse_group(&ahead)?; + if group.content.is_empty() { + input.advance_to(&ahead); + return Ok(Visibility::Inherited); + } + } + if input.peek(Token![pub]) { Self::parse_pub(input) } else if input.peek(Token![crate]) { @@ -394,12 +404,10 @@ #[cfg(feature = "printing")] mod printing { use super::*; - + use crate::print::TokensOrDefault; use proc_macro2::TokenStream; use quote::{ToTokens, TokenStreamExt}; - use crate::print::TokensOrDefault; - impl ToTokens for Variant { fn to_tokens(&self, tokens: &mut TokenStream) { tokens.append_all(&self.attrs); diff -Nru cargo-0.44.1/vendor/syn/src/derive.rs cargo-0.47.0/vendor/syn/src/derive.rs --- cargo-0.44.1/vendor/syn/src/derive.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/syn/src/derive.rs 2020-10-01 21:38:28.000000000 +0000 @@ -88,7 +88,6 @@ #[cfg(feature = "parsing")] pub mod parsing { use super::*; - use crate::parse::{Parse, ParseStream, Result}; impl Parse for DeriveInput { @@ -221,12 +220,10 @@ #[cfg(feature = "printing")] mod printing { use super::*; - - use proc_macro2::TokenStream; - use quote::ToTokens; - use crate::attr::FilterAttrs; use crate::print::TokensOrDefault; + use proc_macro2::TokenStream; + use quote::ToTokens; impl ToTokens for DeriveInput { fn to_tokens(&self, tokens: &mut TokenStream) { diff -Nru cargo-0.44.1/vendor/syn/src/error.rs cargo-0.47.0/vendor/syn/src/error.rs --- cargo-0.44.1/vendor/syn/src/error.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/syn/src/error.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,17 +1,15 @@ -use std::fmt::{self, Debug, Display}; -use std::iter::FromIterator; -use std::slice; -use std::vec; - +#[cfg(feature = "parsing")] +use crate::buffer::Cursor; +use crate::thread::ThreadBound; use proc_macro2::{ Delimiter, Group, Ident, LexError, Literal, Punct, Spacing, Span, TokenStream, TokenTree, }; #[cfg(feature = "printing")] use quote::ToTokens; - -#[cfg(feature = "parsing")] -use crate::buffer::Cursor; -use crate::thread::ThreadBound; +use std::fmt::{self, Debug, Display}; +use std::iter::FromIterator; +use std::slice; +use std::vec; /// The result of a Syn parser. pub type Result = std::result::Result; @@ -31,8 +29,8 @@ /// conversion to `compile_error!` automatically. /// /// ``` -/// extern crate proc_macro; -/// +/// # extern crate proc_macro; +/// # /// use proc_macro::TokenStream; /// use syn::{parse_macro_input, AttributeArgs, ItemFn}; /// @@ -81,7 +79,6 @@ /// # } /// # } /// ``` -#[derive(Clone)] pub struct Error { messages: Vec, } @@ -288,6 +285,14 @@ } } +impl Clone for Error { + fn clone(&self) -> Self { + Error { + messages: self.messages.clone(), + } + } +} + impl Clone for ErrorMessage { fn clone(&self) -> Self { let start = self diff -Nru cargo-0.44.1/vendor/syn/src/expr.rs cargo-0.47.0/vendor/syn/src/expr.rs --- cargo-0.44.1/vendor/syn/src/expr.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/syn/src/expr.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,14 +1,14 @@ use super::*; use crate::punctuated::Punctuated; -#[cfg(feature = "extra-traits")] -use crate::tt::TokenStreamHelper; +#[cfg(feature = "full")] +use crate::reserved::Reserved; use proc_macro2::{Span, TokenStream}; #[cfg(feature = "printing")] use quote::IdentFragment; #[cfg(feature = "printing")] use std::fmt::{self, Display}; use std::hash::{Hash, Hasher}; -#[cfg(all(feature = "parsing", feature = "full"))] +#[cfg(feature = "parsing")] use std::mem; ast_enum_of_structs! { @@ -86,7 +86,7 @@ /// A sign that you may not be choosing the right variable names is if you /// see names getting repeated in your code, like accessing /// `receiver.receiver` or `pat.pat` or `cond.cond`. - pub enum Expr #manual_extra_traits { + pub enum Expr { /// A slice literal expression: `[a, b, c, d]`. Array(ExprArray), @@ -720,232 +720,6 @@ } } -#[cfg(feature = "extra-traits")] -impl Eq for Expr {} - -#[cfg(feature = "extra-traits")] -impl PartialEq for Expr { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Expr::Array(this), Expr::Array(other)) => this == other, - (Expr::Assign(this), Expr::Assign(other)) => this == other, - (Expr::AssignOp(this), Expr::AssignOp(other)) => this == other, - (Expr::Async(this), Expr::Async(other)) => this == other, - (Expr::Await(this), Expr::Await(other)) => this == other, - (Expr::Binary(this), Expr::Binary(other)) => this == other, - (Expr::Block(this), Expr::Block(other)) => this == other, - (Expr::Box(this), Expr::Box(other)) => this == other, - (Expr::Break(this), Expr::Break(other)) => this == other, - (Expr::Call(this), Expr::Call(other)) => this == other, - (Expr::Cast(this), Expr::Cast(other)) => this == other, - (Expr::Closure(this), Expr::Closure(other)) => this == other, - (Expr::Continue(this), Expr::Continue(other)) => this == other, - (Expr::Field(this), Expr::Field(other)) => this == other, - (Expr::ForLoop(this), Expr::ForLoop(other)) => this == other, - (Expr::Group(this), Expr::Group(other)) => this == other, - (Expr::If(this), Expr::If(other)) => this == other, - (Expr::Index(this), Expr::Index(other)) => this == other, - (Expr::Let(this), Expr::Let(other)) => this == other, - (Expr::Lit(this), Expr::Lit(other)) => this == other, - (Expr::Loop(this), Expr::Loop(other)) => this == other, - (Expr::Macro(this), Expr::Macro(other)) => this == other, - (Expr::Match(this), Expr::Match(other)) => this == other, - (Expr::MethodCall(this), Expr::MethodCall(other)) => this == other, - (Expr::Paren(this), Expr::Paren(other)) => this == other, - (Expr::Path(this), Expr::Path(other)) => this == other, - (Expr::Range(this), Expr::Range(other)) => this == other, - (Expr::Reference(this), Expr::Reference(other)) => this == other, - (Expr::Repeat(this), Expr::Repeat(other)) => this == other, - (Expr::Return(this), Expr::Return(other)) => this == other, - (Expr::Struct(this), Expr::Struct(other)) => this == other, - (Expr::Try(this), Expr::Try(other)) => this == other, - (Expr::TryBlock(this), Expr::TryBlock(other)) => this == other, - (Expr::Tuple(this), Expr::Tuple(other)) => this == other, - (Expr::Type(this), Expr::Type(other)) => this == other, - (Expr::Unary(this), Expr::Unary(other)) => this == other, - (Expr::Unsafe(this), Expr::Unsafe(other)) => this == other, - (Expr::Verbatim(this), Expr::Verbatim(other)) => { - TokenStreamHelper(this) == TokenStreamHelper(other) - } - (Expr::While(this), Expr::While(other)) => this == other, - (Expr::Yield(this), Expr::Yield(other)) => this == other, - _ => false, - } - } -} - -#[cfg(feature = "extra-traits")] -impl Hash for Expr { - fn hash(&self, hash: &mut H) - where - H: Hasher, - { - match self { - Expr::Array(expr) => { - hash.write_u8(0); - expr.hash(hash); - } - Expr::Assign(expr) => { - hash.write_u8(1); - expr.hash(hash); - } - Expr::AssignOp(expr) => { - hash.write_u8(2); - expr.hash(hash); - } - Expr::Async(expr) => { - hash.write_u8(3); - expr.hash(hash); - } - Expr::Await(expr) => { - hash.write_u8(4); - expr.hash(hash); - } - Expr::Binary(expr) => { - hash.write_u8(5); - expr.hash(hash); - } - Expr::Block(expr) => { - hash.write_u8(6); - expr.hash(hash); - } - Expr::Box(expr) => { - hash.write_u8(7); - expr.hash(hash); - } - Expr::Break(expr) => { - hash.write_u8(8); - expr.hash(hash); - } - Expr::Call(expr) => { - hash.write_u8(9); - expr.hash(hash); - } - Expr::Cast(expr) => { - hash.write_u8(10); - expr.hash(hash); - } - Expr::Closure(expr) => { - hash.write_u8(11); - expr.hash(hash); - } - Expr::Continue(expr) => { - hash.write_u8(12); - expr.hash(hash); - } - Expr::Field(expr) => { - hash.write_u8(13); - expr.hash(hash); - } - Expr::ForLoop(expr) => { - hash.write_u8(14); - expr.hash(hash); - } - Expr::Group(expr) => { - hash.write_u8(15); - expr.hash(hash); - } - Expr::If(expr) => { - hash.write_u8(16); - expr.hash(hash); - } - Expr::Index(expr) => { - hash.write_u8(17); - expr.hash(hash); - } - Expr::Let(expr) => { - hash.write_u8(18); - expr.hash(hash); - } - Expr::Lit(expr) => { - hash.write_u8(19); - expr.hash(hash); - } - Expr::Loop(expr) => { - hash.write_u8(20); - expr.hash(hash); - } - Expr::Macro(expr) => { - hash.write_u8(21); - expr.hash(hash); - } - Expr::Match(expr) => { - hash.write_u8(22); - expr.hash(hash); - } - Expr::MethodCall(expr) => { - hash.write_u8(23); - expr.hash(hash); - } - Expr::Paren(expr) => { - hash.write_u8(24); - expr.hash(hash); - } - Expr::Path(expr) => { - hash.write_u8(25); - expr.hash(hash); - } - Expr::Range(expr) => { - hash.write_u8(26); - expr.hash(hash); - } - Expr::Reference(expr) => { - hash.write_u8(27); - expr.hash(hash); - } - Expr::Repeat(expr) => { - hash.write_u8(28); - expr.hash(hash); - } - Expr::Return(expr) => { - hash.write_u8(29); - expr.hash(hash); - } - Expr::Struct(expr) => { - hash.write_u8(30); - expr.hash(hash); - } - Expr::Try(expr) => { - hash.write_u8(31); - expr.hash(hash); - } - Expr::TryBlock(expr) => { - hash.write_u8(32); - expr.hash(hash); - } - Expr::Tuple(expr) => { - hash.write_u8(33); - expr.hash(hash); - } - Expr::Type(expr) => { - hash.write_u8(34); - expr.hash(hash); - } - Expr::Unary(expr) => { - hash.write_u8(35); - expr.hash(hash); - } - Expr::Unsafe(expr) => { - hash.write_u8(36); - expr.hash(hash); - } - Expr::Verbatim(expr) => { - hash.write_u8(37); - TokenStreamHelper(expr).hash(hash); - } - Expr::While(expr) => { - hash.write_u8(38); - expr.hash(hash); - } - Expr::Yield(expr) => { - hash.write_u8(39); - expr.hash(hash); - } - Expr::__Nonexhaustive => unreachable!(), - } - } -} - impl Expr { #[cfg(all(feature = "parsing", feature = "full"))] pub(crate) fn replace_attrs(&mut self, new: Vec) -> Vec { @@ -1001,8 +775,7 @@ /// /// *This type is available only if Syn is built with the `"derive"` or `"full"` /// feature.* - #[derive(Eq, PartialEq, Hash)] - pub enum Member #manual_extra_traits { + pub enum Member { /// A named field like `self.x`. Named(Ident), /// An unnamed field like `self.0`. @@ -1010,6 +783,27 @@ } } +impl Eq for Member {} + +impl PartialEq for Member { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Member::Named(this), Member::Named(other)) => this == other, + (Member::Unnamed(this), Member::Unnamed(other)) => this == other, + _ => false, + } + } +} + +impl Hash for Member { + fn hash(&self, state: &mut H) { + match self { + Member::Named(m) => m.hash(state), + Member::Unnamed(m) => m.hash(state), + } + } +} + #[cfg(feature = "printing")] impl IdentFragment for Member { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { @@ -1032,7 +826,7 @@ /// /// *This type is available only if Syn is built with the `"derive"` or `"full"` /// feature.* - pub struct Index #manual_extra_traits { + pub struct Index { pub index: u32, pub span: Span, } @@ -1075,14 +869,6 @@ #[cfg(feature = "full")] ast_struct! { - #[derive(Default)] - pub struct Reserved { - _private: (), - } -} - -#[cfg(feature = "full")] -ast_struct! { /// The `::<>` explicit type parameters passed to a method call: /// `parse::()`. /// @@ -1179,7 +965,6 @@ /// Limit types of a range, inclusive or exclusive. /// /// *This type is available only if Syn is built with the `"full"` feature.* - #[cfg_attr(feature = "clone-impls", derive(Copy))] pub enum RangeLimits { /// Inclusive at the beginning, exclusive at the end. HalfOpen(Token![..]), @@ -1209,10 +994,9 @@ #[cfg(feature = "parsing")] pub(crate) mod parsing { use super::*; - - use crate::parse::discouraged::Speculative; use crate::parse::{Parse, ParseStream, Result}; use crate::path; + use std::cmp::Ordering; crate::custom_keyword!(raw); @@ -1221,10 +1005,8 @@ // // Struct literals are ambiguous in certain positions // https://github.com/rust-lang/rfcs/pull/92 - #[derive(Copy, Clone)] pub struct AllowStruct(bool); - #[derive(Copy, Clone, PartialEq, PartialOrd)] enum Precedence { Any, Assign, @@ -1278,9 +1060,121 @@ } } - #[cfg(feature = "full")] - fn expr_no_struct(input: ParseStream) -> Result { - ambiguous_expr(input, AllowStruct(false)) + impl Expr { + /// An alternative to the primary `Expr::parse` parser (from the + /// [`Parse`] trait) for ambiguous syntactic positions in which a + /// trailing brace should not be taken as part of the expression. + /// + /// Rust grammar has an ambiguity where braces sometimes turn a path + /// expression into a struct initialization and sometimes do not. In the + /// following code, the expression `S {}` is one expression. Presumably + /// there is an empty struct `struct S {}` defined somewhere which it is + /// instantiating. + /// + /// ``` + /// # struct S; + /// # impl std::ops::Deref for S { + /// # type Target = bool; + /// # fn deref(&self) -> &Self::Target { + /// # &true + /// # } + /// # } + /// let _ = *S {}; + /// + /// // parsed by rustc as: `*(S {})` + /// ``` + /// + /// We would want to parse the above using `Expr::parse` after the `=` + /// token. + /// + /// But in the following, `S {}` is *not* a struct init expression. + /// + /// ``` + /// # const S: &bool = &true; + /// if *S {} {} + /// + /// // parsed by rustc as: + /// // + /// // if (*S) { + /// // /* empty block */ + /// // } + /// // { + /// // /* another empty block */ + /// // } + /// ``` + /// + /// For that reason we would want to parse if-conditions using + /// `Expr::parse_without_eager_brace` after the `if` token. Same for + /// similar syntactic positions such as the condition expr after a + /// `while` token or the expr at the top of a `match`. + /// + /// The Rust grammar's choices around which way this ambiguity is + /// resolved at various syntactic positions is fairly arbitrary. Really + /// either parse behavior could work in most positions, and language + /// designers just decide each case based on which is more likely to be + /// what the programmer had in mind most of the time. + /// + /// ``` + /// # struct S; + /// # fn doc() -> S { + /// if return S {} {} + /// # unreachable!() + /// # } + /// + /// // parsed by rustc as: + /// // + /// // if (return (S {})) { + /// // } + /// // + /// // but could equally well have been this other arbitrary choice: + /// // + /// // if (return S) { + /// // } + /// // {} + /// ``` + /// + /// Note the grammar ambiguity on trailing braces is distinct from + /// precedence and is not captured by assigning a precedence level to + /// the braced struct init expr in relation to other operators. This can + /// be illustrated by `return 0..S {}` vs `match 0..S {}`. The former + /// parses as `return (0..(S {}))` implying tighter precedence for + /// struct init than `..`, while the latter parses as `match (0..S) {}` + /// implying tighter precedence for `..` than struct init, a + /// contradiction. + #[cfg(feature = "full")] + pub fn parse_without_eager_brace(input: ParseStream) -> Result { + ambiguous_expr(input, AllowStruct(false)) + } + } + + impl Copy for AllowStruct {} + + impl Clone for AllowStruct { + fn clone(&self) -> Self { + *self + } + } + + impl Copy for Precedence {} + + impl Clone for Precedence { + fn clone(&self) -> Self { + *self + } + } + + impl PartialEq for Precedence { + fn eq(&self, other: &Self) -> bool { + *self as u8 == *other as u8 + } + } + + impl PartialOrd for Precedence { + fn partial_cmp(&self, other: &Self) -> Option { + let this = *self as u8; + let other = *other as u8; + Some(this.cmp(&other)) + } } #[cfg(feature = "full")] @@ -1462,6 +1356,30 @@ parse_expr(input, lhs, allow_struct, Precedence::Any) } + #[cfg(feature = "full")] + fn expr_attrs(input: ParseStream) -> Result> { + let mut attrs = Vec::new(); + loop { + if input.peek(token::Group) { + let ahead = input.fork(); + let group = crate::group::parse_group(&ahead)?; + if !group.content.peek(Token![#]) || group.content.peek2(Token![!]) { + break; + } + let attr = group.content.call(attr::parsing::single_parse_outer)?; + if !group.content.is_empty() { + break; + } + attrs.push(attr); + } else if input.peek(Token![#]) { + attrs.push(input.call(attr::parsing::single_parse_outer)?); + } else { + break; + } + } + Ok(attrs) + } + // // & // &mut @@ -1469,66 +1387,53 @@ #[cfg(feature = "full")] fn unary_expr(input: ParseStream, allow_struct: AllowStruct) -> Result { let begin = input.fork(); - let ahead = input.fork(); - let attrs = ahead.call(Attribute::parse_outer)?; - if ahead.peek(Token![&]) - || ahead.peek(Token![box]) - || ahead.peek(Token![*]) - || ahead.peek(Token![!]) - || ahead.peek(Token![-]) - { - input.advance_to(&ahead); - if input.peek(Token![&]) { - let and_token: Token![&] = input.parse()?; - let raw: Option = if input.peek(raw) - && (input.peek2(Token![mut]) || input.peek2(Token![const])) - { + let attrs = input.call(expr_attrs)?; + if input.peek(Token![&]) { + let and_token: Token![&] = input.parse()?; + let raw: Option = + if input.peek(raw) && (input.peek2(Token![mut]) || input.peek2(Token![const])) { Some(input.parse()?) } else { None }; - let mutability: Option = input.parse()?; - if raw.is_some() && mutability.is_none() { - input.parse::()?; - } - let expr = Box::new(unary_expr(input, allow_struct)?); - if raw.is_some() { - Ok(Expr::Verbatim(verbatim::between(begin, input))) - } else { - Ok(Expr::Reference(ExprReference { - attrs, - and_token, - raw: Reserved::default(), - mutability, - expr, - })) - } - } else if input.peek(Token![box]) { - Ok(Expr::Box(ExprBox { - attrs, - box_token: input.parse()?, - expr: Box::new(unary_expr(input, allow_struct)?), - })) + let mutability: Option = input.parse()?; + if raw.is_some() && mutability.is_none() { + input.parse::()?; + } + let expr = Box::new(unary_expr(input, allow_struct)?); + if raw.is_some() { + Ok(Expr::Verbatim(verbatim::between(begin, input))) } else { - Ok(Expr::Unary(ExprUnary { + Ok(Expr::Reference(ExprReference { attrs, - op: input.parse()?, - expr: Box::new(unary_expr(input, allow_struct)?), + and_token, + raw: Reserved::default(), + mutability, + expr, })) } + } else if input.peek(Token![box]) { + Ok(Expr::Box(ExprBox { + attrs, + box_token: input.parse()?, + expr: Box::new(unary_expr(input, allow_struct)?), + })) + } else if input.peek(Token![*]) || input.peek(Token![!]) || input.peek(Token![-]) { + Ok(Expr::Unary(ExprUnary { + attrs, + op: input.parse()?, + expr: Box::new(unary_expr(input, allow_struct)?), + })) } else { - trailer_expr(input, allow_struct) + trailer_expr(attrs, input, allow_struct) } } #[cfg(not(feature = "full"))] fn unary_expr(input: ParseStream, allow_struct: AllowStruct) -> Result { - let ahead = input.fork(); - let attrs = ahead.call(Attribute::parse_outer)?; - if ahead.peek(Token![*]) || ahead.peek(Token![!]) || ahead.peek(Token![-]) { - input.advance_to(&ahead); + if input.peek(Token![*]) || input.peek(Token![!]) || input.peek(Token![-]) { Ok(Expr::Unary(ExprUnary { - attrs, + attrs: Vec::new(), op: input.parse()?, expr: Box::new(unary_expr(input, allow_struct)?), })) @@ -1544,9 +1449,11 @@ // [ ] ... // ? ... #[cfg(feature = "full")] - fn trailer_expr(input: ParseStream, allow_struct: AllowStruct) -> Result { - let outer_attrs = input.call(Attribute::parse_outer)?; - + fn trailer_expr( + outer_attrs: Vec, + input: ParseStream, + allow_struct: AllowStruct, + ) -> Result { let atom = atom_expr(input, allow_struct)?; let mut e = trailer_helper(input, atom)?; @@ -1568,18 +1475,26 @@ args: content.parse_terminated(Expr::parse)?, }); } else if input.peek(Token![.]) && !input.peek(Token![..]) { - let dot_token: Token![.] = input.parse()?; + let mut dot_token: Token![.] = input.parse()?; - if input.peek(token::Await) { + let await_token: Option = input.parse()?; + if let Some(await_token) = await_token { e = Expr::Await(ExprAwait { attrs: Vec::new(), base: Box::new(e), dot_token, - await_token: input.parse()?, + await_token, }); continue; } + let float_token: Option = input.parse()?; + if let Some(float_token) = float_token { + if multi_index(&mut e, &mut dot_token, float_token)? { + continue; + } + } + let member: Member = input.parse()?; let turbofish = if member.is_named() && input.peek(Token![::]) { Some(MethodTurbofish { @@ -1665,10 +1580,17 @@ }); } else if input.peek(Token![.]) && !input.peek(Token![..]) && !input.peek2(token::Await) { + let mut dot_token: Token![.] = input.parse()?; + let float_token: Option = input.parse()?; + if let Some(float_token) = float_token { + if multi_index(&mut e, &mut dot_token, float_token)? { + continue; + } + } e = Expr::Field(ExprField { attrs: Vec::new(), base: Box::new(e), - dot_token: input.parse()?, + dot_token, member: input.parse()?, }); } else if input.peek(token::Bracket) { @@ -1691,7 +1613,11 @@ // interactions, as they are fully contained. #[cfg(feature = "full")] fn atom_expr(input: ParseStream, allow_struct: AllowStruct) -> Result { - if input.peek(token::Group) && !input.peek2(Token![::]) { + if input.peek(token::Group) + && !input.peek2(Token![::]) + && !input.peek2(Token![!]) + && !input.peek2(token::Brace) + { input.call(expr_group).map(Expr::Group) } else if input.peek(Lit) { input.parse().map(Expr::Lit) @@ -1921,7 +1847,7 @@ #[cfg(feature = "full")] pub(crate) fn expr_early(input: ParseStream) -> Result { - let mut attrs = input.call(Attribute::parse_outer)?; + let mut attrs = input.call(expr_attrs)?; let mut expr = if input.peek(Token![if]) { Expr::If(input.parse()?) } else if input.peek(Token![while]) { @@ -2014,7 +1940,7 @@ let_token: input.parse()?, pat: pat::parsing::multi_pat_with_leading_vert(input)?, eq_token: input.parse()?, - expr: Box::new(input.call(expr_no_struct)?), + expr: Box::new(input.call(Expr::parse_without_eager_brace)?), }) } @@ -2025,7 +1951,7 @@ Ok(ExprIf { attrs, if_token: input.parse()?, - cond: Box::new(input.call(expr_no_struct)?), + cond: Box::new(input.call(Expr::parse_without_eager_brace)?), then_branch: input.parse()?, else_branch: { if input.peek(Token![else]) { @@ -2068,7 +1994,7 @@ let pat = pat::parsing::multi_pat_with_leading_vert(input)?; let in_token: Token![in] = input.parse()?; - let expr: Expr = input.call(expr_no_struct)?; + let expr: Expr = input.call(Expr::parse_without_eager_brace)?; let content; let brace_token = braced!(content in input); @@ -2113,7 +2039,7 @@ fn parse(input: ParseStream) -> Result { let outer_attrs = input.call(Attribute::parse_outer)?; let match_token: Token![match] = input.parse()?; - let expr = expr_no_struct(input)?; + let expr = Expr::parse_without_eager_brace(input)?; let content; let brace_token = braced!(content in input); @@ -2323,7 +2249,7 @@ let outer_attrs = input.call(Attribute::parse_outer)?; let label: Option { + type Target = [A::Item]; + #[inline(always)] + #[must_use] + fn deref(&self) -> &Self::Target { + &self.data.as_slice()[..self.len] + } +} + +impl DerefMut for ArrayVec { + #[inline(always)] + #[must_use] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.data.as_slice_mut()[..self.len] + } +} + +impl> Index for ArrayVec { + type Output = >::Output; + #[inline(always)] + #[must_use] + fn index(&self, index: I) -> &Self::Output { + &self.deref()[index] + } +} + +impl> IndexMut for ArrayVec { + #[inline(always)] + #[must_use] + fn index_mut(&mut self, index: I) -> &mut Self::Output { + &mut self.deref_mut()[index] + } +} + +impl ArrayVec { + /// Move all values from `other` into this vec. + /// + /// ## Panics + /// * If the vec overflows its capacity + /// + /// ## Example + /// ```rust + /// # use tinyvec::*; + /// let mut av = array_vec!([i32; 10], 1, 2, 3); + /// let mut av2 = array_vec!([i32; 10], 4, 5, 6); + /// av.append(&mut av2); + /// assert_eq!(av, &[1, 2, 3, 4, 5, 6][..]); + /// assert_eq!(av2, &[][..]); + /// ``` + #[inline] + pub fn append(&mut self, other: &mut Self) { + for item in other.drain(..) { + self.push(item) + } + } + + /// A `*mut` pointer to the backing array. + /// + /// ## Safety + /// + /// This pointer has provenance over the _entire_ backing array. + #[inline(always)] + #[must_use] + pub fn as_mut_ptr(&mut self) -> *mut A::Item { + self.data.as_slice_mut().as_mut_ptr() + } + + /// Performs a `deref_mut`, into unique slice form. + #[inline(always)] + #[must_use] + pub fn as_mut_slice(&mut self) -> &mut [A::Item] { + self.deref_mut() + } + + /// A `*const` pointer to the backing array. + /// + /// ## Safety + /// + /// This pointer has provenance over the _entire_ backing array. + #[inline(always)] + #[must_use] + pub fn as_ptr(&self) -> *const A::Item { + self.data.as_slice().as_ptr() + } + + /// Performs a `deref`, into shared slice form. + #[inline(always)] + #[must_use] + pub fn as_slice(&self) -> &[A::Item] { + self.deref() + } + + /// The capacity of the `ArrayVec`. + /// + /// This is fixed based on the array type, but can't yet be made a `const fn` + /// on Stable Rust. + #[inline(always)] + #[must_use] + pub fn capacity(&self) -> usize { + A::CAPACITY + } + + /// Truncates the `ArrayVec` down to length 0. + #[inline(always)] + pub fn clear(&mut self) { + self.truncate(0) + } + + /// Creates a draining iterator that removes the specified range in the vector + /// and yields the removed items. + /// + /// ## Panics + /// * If the start is greater than the end + /// * If the end is past the edge of the vec. + /// + /// ## Example + /// ```rust + /// # use tinyvec::*; + /// let mut av = array_vec!([i32; 4], 1, 2, 3); + /// let av2: ArrayVec<[i32; 4]> = av.drain(1..).collect(); + /// assert_eq!(av.as_slice(), &[1][..]); + /// assert_eq!(av2.as_slice(), &[2, 3][..]); + /// + /// av.drain(..); + /// assert_eq!(av.as_slice(), &[]); + /// ``` + #[inline] + pub fn drain>( + &mut self, + range: R, + ) -> ArrayVecDrain<'_, A> { + use core::ops::Bound; + let start = match range.start_bound() { + Bound::Included(x) => *x, + Bound::Excluded(x) => x + 1, + Bound::Unbounded => 0, + }; + let end = match range.end_bound() { + Bound::Included(x) => x + 1, + Bound::Excluded(x) => *x, + Bound::Unbounded => self.len, + }; + assert!( + start <= end, + "ArrayVec::drain> Illegal range, {} to {}", + start, + end + ); + assert!( + end <= self.len, + "ArrayVec::drain> Range ends at {} but length is only {}!", + end, + self.len + ); + ArrayVecDrain { + parent: self, + target_start: start, + target_index: start, + target_end: end, + } + } + + /// Clone each element of the slice into this `ArrayVec`. + /// + /// ## Panics + /// * If the `ArrayVec` would overflow, this will panic. + #[inline] + pub fn extend_from_slice(&mut self, sli: &[A::Item]) + where + A::Item: Clone, + { + if sli.is_empty() { + return; + } + + let new_len = self.len + sli.len(); + if new_len > A::CAPACITY { + panic!( + "ArrayVec::extend_from_slice> total length {} exceeds capacity {}!", + new_len, + A::CAPACITY + ) + } + + let target = &mut self.data.as_slice_mut()[self.len..new_len]; + target.clone_from_slice(sli); + self.set_len(new_len); + } + + /// Wraps up an array and uses the given length as the initial length. + /// + /// If you want to simply use the full array, use `from` instead. + /// + /// ## Panics + /// + /// * The length specified must be less than or equal to the capacity of the array. + #[inline] + #[must_use] + #[allow(clippy::match_wild_err_arm)] + pub fn from_array_len(data: A, len: usize) -> Self { + match Self::try_from_array_len(data, len) { + Ok(out) => out, + Err(_) => { + panic!("ArrayVec::from_array_len> length {} exceeds capacity {}!", len, A::CAPACITY) + } + } + } + + /// Inserts an item at the position given, moving all following elements +1 + /// index. + /// + /// ## Panics + /// * If `index` > `len` or + /// * If the capacity is exhausted + /// + /// ## Example + /// ```rust + /// use tinyvec::*; + /// let mut av = array_vec!([i32; 10], 1, 2, 3); + /// av.insert(1, 4); + /// assert_eq!(av.as_slice(), &[1, 4, 2, 3]); + /// av.insert(4, 5); + /// assert_eq!(av.as_slice(), &[1, 4, 2, 3, 5]); + /// ``` + #[inline] + pub fn insert(&mut self, index: usize, item: A::Item) { + if index > self.len { + panic!("ArrayVec::insert> index {} is out of bounds {}", index, self.len); + } + + // Try to push the element. + self.push(item); + // And move it into its place. + self.as_mut_slice()[index..].rotate_right(1); + } + + /// Checks if the length is 0. + #[inline(always)] + #[must_use] + pub fn is_empty(&self) -> bool { + self.len == 0 + } + + /// The length of the `ArrayVec` (in elements). + #[inline(always)] + #[must_use] + pub fn len(&self) -> usize { + self.len + } + + /// Makes a new, empty `ArrayVec`. + #[inline(always)] + #[must_use] + pub fn new() -> Self + where + A: Default, + { + Self::default() + } + + /// Remove and return the last element of the vec, if there is one. + /// + /// ## Failure + /// * If the vec is empty you get `None`. + /// + /// ## Example + /// ```rust + /// # use tinyvec::*; + /// let mut av = array_vec!([i32; 10], 1, 2); + /// assert_eq!(av.pop(), Some(2)); + /// assert_eq!(av.pop(), Some(1)); + /// assert_eq!(av.pop(), None); + /// ``` + #[inline] + pub fn pop(&mut self) -> Option { + if self.len > 0 { + self.len -= 1; + let out = take(&mut self.data.as_slice_mut()[self.len]); + Some(out) + } else { + None + } + } + + /// Place an element onto the end of the vec. + /// + /// ## Panics + /// * If the length of the vec would overflow the capacity. + /// + /// ## Example + /// ```rust + /// # use tinyvec::*; + /// let mut av = array_vec!([i32; 2]); + /// assert_eq!(&av[..], []); + /// av.push(1); + /// assert_eq!(&av[..], [1]); + /// av.push(2); + /// assert_eq!(&av[..], [1, 2]); + /// // av.push(3); this would overflow the ArrayVec and panic! + /// ``` + #[inline(always)] + pub fn push(&mut self, val: A::Item) { + if self.len < A::CAPACITY { + replace(&mut self.data.as_slice_mut()[self.len], val); + self.len += 1; + } else { + panic!("ArrayVec::push> capacity overflow!") + } + } + + /// Removes the item at `index`, shifting all others down by one index. + /// + /// Returns the removed element. + /// + /// ## Panics + /// + /// * If the index is out of bounds. + /// + /// ## Example + /// + /// ```rust + /// # use tinyvec::*; + /// let mut av = array_vec!([i32; 4], 1, 2, 3); + /// assert_eq!(av.remove(1), 2); + /// assert_eq!(&av[..], [1, 3]); + /// ``` + #[inline] + pub fn remove(&mut self, index: usize) -> A::Item { + let targets: &mut [A::Item] = &mut self.deref_mut()[index..]; + let item = replace(&mut targets[0], A::Item::default()); + targets.rotate_left(1); + self.len -= 1; + item + } + + /// Resize the vec to the new length. + /// + /// If it needs to be longer, it's filled with clones of the provided value. + /// If it needs to be shorter, it's truncated. + /// + /// ## Example + /// + /// ```rust + /// # use tinyvec::*; + /// + /// let mut av = array_vec!([&str; 10], "hello"); + /// av.resize(3, "world"); + /// assert_eq!(&av[..], ["hello", "world", "world"]); + /// + /// let mut av = array_vec!([i32; 10], 1, 2, 3, 4); + /// av.resize(2, 0); + /// assert_eq!(&av[..], [1, 2]); + /// ``` + #[inline] + pub fn resize(&mut self, new_len: usize, new_val: A::Item) + where + A::Item: Clone, + { + match new_len.checked_sub(self.len) { + None => self.truncate(new_len), + Some(0) => (), + Some(new_elements) => { + for _ in 1..new_elements { + self.push(new_val.clone()); + } + self.push(new_val); + } + } + } + + /// Resize the vec to the new length. + /// + /// If it needs to be longer, it's filled with repeated calls to the provided + /// function. If it needs to be shorter, it's truncated. + /// + /// ## Example + /// + /// ```rust + /// # use tinyvec::*; + /// + /// let mut av = array_vec!([i32; 10], 1, 2, 3); + /// av.resize_with(5, Default::default); + /// assert_eq!(&av[..], [1, 2, 3, 0, 0]); + /// + /// let mut av = array_vec!([i32; 10]); + /// let mut p = 1; + /// av.resize_with(4, || { p *= 2; p }); + /// assert_eq!(&av[..], [2, 4, 8, 16]); + /// ``` + #[inline] + pub fn resize_with A::Item>( + &mut self, + new_len: usize, + mut f: F, + ) { + match new_len.checked_sub(self.len) { + None => self.truncate(new_len), + Some(new_elements) => { + for _ in 0..new_elements { + self.push(f()); + } + } + } + } + + /// Walk the vec and keep only the elements that pass the predicate given. + /// + /// ## Example + /// + /// ```rust + /// # use tinyvec::*; + /// + /// let mut av = array_vec!([i32; 10], 1, 1, 2, 3, 3, 4); + /// av.retain(|&x| x % 2 == 0); + /// assert_eq!(&av[..], [2, 4]); + /// ``` + #[inline] + pub fn retain bool>(&mut self, mut acceptable: F) { + // Drop guard to contain exactly the remaining elements when the test + // panics. + struct JoinOnDrop<'vec, Item> { + items: &'vec mut [Item], + done_end: usize, + // Start of tail relative to `done_end`. + tail_start: usize, + } + + impl Drop for JoinOnDrop<'_, Item> { + fn drop(&mut self) { + self.items[self.done_end..].rotate_left(self.tail_start); + } + } + + let mut rest = JoinOnDrop { + items: &mut self.data.as_slice_mut()[..self.len], + done_end: 0, + tail_start: 0, + }; + + for idx in 0..self.len { + // Loop start invariant: idx = rest.done_end + rest.tail_start + if !acceptable(&rest.items[idx]) { + let _ = take(&mut rest.items[idx]); + self.len -= 1; + rest.tail_start += 1; + } else { + rest.items.swap(rest.done_end, idx); + rest.done_end += 1; + } + } + } + + /// Forces the length of the vector to `new_len`. + /// + /// ## Panics + /// * If `new_len` is greater than the vec's capacity. + /// + /// ## Safety + /// * This is a fully safe operation! The inactive memory already counts as + /// "initialized" by Rust's rules. + /// * Other than "the memory is initialized" there are no other guarantees + /// regarding what you find in the inactive portion of the vec. + #[inline(always)] + pub fn set_len(&mut self, new_len: usize) { + if new_len > A::CAPACITY { + // Note(Lokathor): Technically we don't have to panic here, and we could + // just let some other call later on trigger a panic on accident when the + // length is wrong. However, it's a lot easier to catch bugs when things + // are more "fail-fast". + panic!("ArrayVec: set_len overflow!") + } else { + self.len = new_len; + } + } + + /// Fill the vector until its capacity has been reached. + /// + /// Successively fills unused space in the spare slice of the vector with + /// elements from the iterator. It then returns the remaining iterator + /// without exhausting it. This also allows appending the head of an + /// infinite iterator. + /// + /// This is an alternative to `Extend::extend` method for cases where the + /// length of the iterator can not be checked. Since this vector can not + /// reallocate to increase its capacity, it is unclear what to do with + /// remaining elements in the iterator and the iterator itself. The + /// interface also provides no way to communicate this to the caller. + /// + /// ## Panics + /// * If the `next` method of the provided iterator panics. + /// + /// ## Example + /// + /// ```rust + /// # use tinyvec::*; + /// let mut av = array_vec!([i32; 4]); + /// let mut to_inf = av.fill(0..); + /// assert_eq!(&av[..], [0, 1, 2, 3]); + /// assert_eq!(to_inf.next(), Some(4)); + /// ``` + #[inline] + pub fn fill>( + &mut self, + iter: I, + ) -> I::IntoIter { + let mut iter = iter.into_iter(); + for element in iter.by_ref().take(self.capacity() - self.len()) { + self.push(element); + } + iter + } + + /// Splits the collection at the point given. + /// + /// * `[0, at)` stays in this vec + /// * `[at, len)` ends up in the new vec. + /// + /// ## Panics + /// * if at > len + /// + /// ## Example + /// + /// ```rust + /// # use tinyvec::*; + /// let mut av = array_vec!([i32; 4], 1, 2, 3); + /// let av2 = av.split_off(1); + /// assert_eq!(&av[..], [1]); + /// assert_eq!(&av2[..], [2, 3]); + /// ``` + #[inline] + pub fn split_off(&mut self, at: usize) -> Self + where + Self: Default, + { + // FIXME: should this just use drain into the output? + if at > self.len { + panic!( + "ArrayVec::split_off> at value {} exceeds length of {}", + at, self.len + ); + } + let mut new = Self::default(); + let moves = &mut self.as_mut_slice()[at..]; + let split_len = moves.len(); + let targets = &mut new.data.as_slice_mut()[..split_len]; + moves.swap_with_slice(targets); + new.len = split_len; + self.len = at; + new + } + + /// Remove an element, swapping the end of the vec into its place. + /// + /// ## Panics + /// * If the index is out of bounds. + /// + /// ## Example + /// ```rust + /// # use tinyvec::*; + /// let mut av = array_vec!([&str; 4], "foo", "bar", "quack", "zap"); + /// + /// assert_eq!(av.swap_remove(1), "bar"); + /// assert_eq!(&av[..], ["foo", "zap", "quack"]); + /// + /// assert_eq!(av.swap_remove(0), "foo"); + /// assert_eq!(&av[..], ["quack", "zap"]); + /// ``` + #[inline] + pub fn swap_remove(&mut self, index: usize) -> A::Item { + assert!( + index < self.len, + "ArrayVec::swap_remove> index {} is out of bounds {}", + index, + self.len + ); + if index == self.len - 1 { + self.pop().unwrap() + } else { + let i = self.pop().unwrap(); + replace(&mut self[index], i) + } + } + + /// Reduces the vec's length to the given value. + /// + /// If the vec is already shorter than the input, nothing happens. + #[inline] + pub fn truncate(&mut self, new_len: usize) { + if needs_drop::() { + while self.len > new_len { + self.pop(); + } + } else { + self.len = self.len.min(new_len); + } + } + + /// Wraps an array, using the given length as the starting length. + /// + /// If you want to use the whole length of the array, you can just use the + /// `From` impl. + /// + /// ## Failure + /// + /// If the given length is greater than the capacity of the array this will + /// error, and you'll get the array back in the `Err`. + #[inline] + pub fn try_from_array_len(data: A, len: usize) -> Result { + if len <= A::CAPACITY { + Ok(Self { data, len }) + } else { + Err(data) + } + } +} + +#[cfg(feature = "grab_spare_slice")] +impl ArrayVec { + /// Obtain the shared slice of the array _after_ the active memory. + /// + /// ## Example + /// ```rust + /// # use tinyvec::*; + /// let mut av = array_vec!([i32; 4]); + /// assert_eq!(av.grab_spare_slice().len(), 4); + /// av.push(10); + /// av.push(11); + /// av.push(12); + /// av.push(13); + /// assert_eq!(av.grab_spare_slice().len(), 0); + /// ``` + #[inline(always)] + pub fn grab_spare_slice(&self) -> &[A::Item] { + &self.data.as_slice()[self.len..] + } + + /// Obtain the mutable slice of the array _after_ the active memory. + /// + /// ## Example + /// ```rust + /// # use tinyvec::*; + /// let mut av = array_vec!([i32; 4]); + /// assert_eq!(av.grab_spare_slice_mut().len(), 4); + /// av.push(10); + /// av.push(11); + /// assert_eq!(av.grab_spare_slice_mut().len(), 2); + /// ``` + #[inline(always)] + pub fn grab_spare_slice_mut(&mut self) -> &mut [A::Item] { + &mut self.data.as_slice_mut()[self.len..] + } +} + +#[cfg(feature = "nightly_slice_partition_dedup")] +impl ArrayVec { + /// De-duplicates the vec contents. + #[inline(always)] + pub fn dedup(&mut self) + where + A::Item: PartialEq, + { + self.dedup_by(|a, b| a == b) + } + + /// De-duplicates the vec according to the predicate given. + #[inline(always)] + pub fn dedup_by(&mut self, same_bucket: F) + where + F: FnMut(&mut A::Item, &mut A::Item) -> bool, + { + let len = { + let (dedup, _) = self.as_mut_slice().partition_dedup_by(same_bucket); + dedup.len() + }; + self.truncate(len); + } + + /// De-duplicates the vec according to the key selector given. + #[inline(always)] + pub fn dedup_by_key(&mut self, mut key: F) + where + F: FnMut(&mut A::Item) -> K, + K: PartialEq, + { + self.dedup_by(|a, b| key(a) == key(b)) + } +} + +/// Draining iterator for `ArrayVecDrain` +/// +/// See [`ArrayVec::drain`](ArrayVec::drain) +pub struct ArrayVecDrain<'p, A: Array> { + parent: &'p mut ArrayVec, + target_start: usize, + target_index: usize, + target_end: usize, +} +impl<'p, A: Array> Iterator for ArrayVecDrain<'p, A> { + type Item = A::Item; + #[inline] + fn next(&mut self) -> Option { + if self.target_index != self.target_end { + let out = take(&mut self.parent[self.target_index]); + self.target_index += 1; + Some(out) + } else { + None + } + } +} +impl<'p, A: Array> FusedIterator for ArrayVecDrain<'p, A> { } +impl<'p, A: Array> Drop for ArrayVecDrain<'p, A> { + #[inline] + fn drop(&mut self) { + // Changed because it was moving `self`, it's also more clear and the std does the same + self.for_each(drop); + // Implementation very similar to [`ArrayVec::remove`](ArrayVec::remove) + let count = self.target_end - self.target_start; + let targets: &mut [A::Item] = &mut self.parent.deref_mut()[self.target_start..]; + targets.rotate_left(count); + self.parent.len -= count; + } +} + +impl AsMut<[A::Item]> for ArrayVec { + #[inline(always)] + #[must_use] + fn as_mut(&mut self) -> &mut [A::Item] { + &mut *self + } +} + +impl AsRef<[A::Item]> for ArrayVec { + #[inline(always)] + #[must_use] + fn as_ref(&self) -> &[A::Item] { + &*self + } +} + +impl Borrow<[A::Item]> for ArrayVec { + #[inline(always)] + #[must_use] + fn borrow(&self) -> &[A::Item] { + &*self + } +} + +impl BorrowMut<[A::Item]> for ArrayVec { + #[inline(always)] + #[must_use] + fn borrow_mut(&mut self) -> &mut [A::Item] { + &mut *self + } +} + +impl Extend for ArrayVec { + #[inline] + fn extend>(&mut self, iter: T) { + for t in iter { + self.push(t) + } + } +} + +impl From for ArrayVec { + #[inline(always)] + #[must_use] + /// The output has a length equal to the full array. + /// + /// If you want to select a length, use + /// [`from_array_len`](ArrayVec::from_array_len) + fn from(data: A) -> Self { + Self { len: data.as_slice().len(), data } + } +} + +impl FromIterator for ArrayVec { + #[inline] + #[must_use] + fn from_iter>(iter: T) -> Self { + let mut av = Self::default(); + for i in iter { + av.push(i) + } + av + } +} + +/// Iterator for consuming an `ArrayVec` and returning owned elements. +pub struct ArrayVecIterator { + base: usize, + len: usize, + data: A, +} + +impl ArrayVecIterator { + /// Returns the remaining items of this iterator as a slice. + #[inline] + #[must_use] + pub fn as_slice(&self) -> &[A::Item] { + &self.data.as_slice()[self.base..self.len] + } +} +impl FusedIterator for ArrayVecIterator { } +impl Iterator for ArrayVecIterator { + type Item = A::Item; + #[inline] + fn next(&mut self) -> Option { + if self.base < self.len { + let out = take(&mut self.data.as_slice_mut()[self.base]); + self.base += 1; + Some(out) + } else { + None + } + } + #[inline(always)] + #[must_use] + fn size_hint(&self) -> (usize, Option) { + let s = self.len - self.base; + (s, Some(s)) + } + #[inline(always)] + fn count(self) -> usize { + self.len - self.base + } + #[inline] + fn last(mut self) -> Option { + Some(take(&mut self.data.as_slice_mut()[self.len])) + } + #[inline] + fn nth(&mut self, n: usize) -> Option { + let i = self.base + (n - 1); + if i < self.len { + let out = take(&mut self.data.as_slice_mut()[i]); + self.base = i + 1; + Some(out) + } else { + None + } + } +} + +impl Debug for ArrayVecIterator where A::Item: Debug { + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("ArrayVecIterator").field(&self.as_slice()).finish() + } +} + +impl IntoIterator for ArrayVec { + type Item = A::Item; + type IntoIter = ArrayVecIterator; + #[inline(always)] + #[must_use] + fn into_iter(self) -> Self::IntoIter { + ArrayVecIterator { base: 0, len: self.len, data: self.data } + } +} + +impl PartialEq for ArrayVec +where + A::Item: PartialEq, +{ + #[inline] + #[must_use] + fn eq(&self, other: &Self) -> bool { + self.as_slice().eq(other.as_slice()) + } +} +impl Eq for ArrayVec where A::Item: Eq {} + +impl PartialOrd for ArrayVec +where + A::Item: PartialOrd, +{ + #[inline] + #[must_use] + fn partial_cmp(&self, other: &Self) -> Option { + self.as_slice().partial_cmp(other.as_slice()) + } +} +impl Ord for ArrayVec +where + A::Item: Ord, +{ + #[inline] + #[must_use] + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.as_slice().cmp(other.as_slice()) + } +} + +impl PartialEq<&A> for ArrayVec +where + A::Item: PartialEq, +{ + #[inline] + #[must_use] + fn eq(&self, other: &&A) -> bool { + self.as_slice().eq(other.as_slice()) + } +} + +impl PartialEq<&[A::Item]> for ArrayVec +where + A::Item: PartialEq, +{ + #[inline] + #[must_use] + fn eq(&self, other: &&[A::Item]) -> bool { + self.as_slice().eq(*other) + } +} + +impl Hash for ArrayVec +where + A::Item: Hash, +{ + #[inline] + fn hash(&self, state: &mut H) { + self.as_slice().hash(state) + } +} + +#[cfg(feature = "experimental_write_impl")] +impl> core::fmt::Write for ArrayVec +{ + fn write_str(&mut self, s: &str) -> core::fmt::Result { + let my_len = self.len(); + let str_len = s.as_bytes().len(); + if my_len + str_len <= A::CAPACITY { + let remainder = &mut self.data.as_slice_mut()[my_len..]; + let target = &mut remainder[..str_len]; + target.copy_from_slice(s.as_bytes()); + Ok(()) + } else { + Err(core::fmt::Error) + } + } +} + +// // // // // // // // +// Formatting impls +// // // // // // // // + +impl Binary for ArrayVec +where + A::Item: Binary, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + Binary::fmt(elem, f)?; + } + write!(f, "]") + } +} + +impl Debug for ArrayVec +where + A::Item: Debug, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + Debug::fmt(elem, f)?; + } + write!(f, "]") + } +} + +impl Display for ArrayVec +where + A::Item: Display, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + Display::fmt(elem, f)?; + } + write!(f, "]") + } +} + +impl LowerExp for ArrayVec +where + A::Item: LowerExp, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + LowerExp::fmt(elem, f)?; + } + write!(f, "]") + } +} + +impl LowerHex for ArrayVec +where + A::Item: LowerHex, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + LowerHex::fmt(elem, f)?; + } + write!(f, "]") + } +} + +impl Octal for ArrayVec +where + A::Item: Octal, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + Octal::fmt(elem, f)?; + } + write!(f, "]") + } +} + +impl Pointer for ArrayVec +where + A::Item: Pointer, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + Pointer::fmt(elem, f)?; + } + write!(f, "]") + } +} + +impl UpperExp for ArrayVec +where + A::Item: UpperExp, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + UpperExp::fmt(elem, f)?; + } + write!(f, "]") + } +} + +impl UpperHex for ArrayVec +where + A::Item: UpperHex, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + UpperHex::fmt(elem, f)?; + } + write!(f, "]") + } +} diff -Nru cargo-0.44.1/vendor/tinyvec/src/lib.rs cargo-0.47.0/vendor/tinyvec/src/lib.rs --- cargo-0.44.1/vendor/tinyvec/src/lib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/tinyvec/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,96 @@ +#![no_std] +#![forbid(unsafe_code)] +#![cfg_attr( + feature = "nightly_slice_partition_dedup", + feature(slice_partition_dedup) +)] +#![cfg_attr(feature = "nightly_const_generics", allow(incomplete_features))] +#![cfg_attr(feature = "nightly_const_generics", feature(const_generics))] +#![warn(clippy::missing_inline_in_public_items)] +#![warn(clippy::must_use_candidate)] +#![warn(missing_docs)] + +//! Programmers can have a little vec, as a treat. +//! +//! ## What This Is +//! +//! This crate provides 100% safe code alternatives to both +//! [arrayvec](https://docs.rs/arrayvec) and +//! [smallvec](https://docs.rs/smallvec). +//! +//! Being 100% safe means that you have to have some sort of compromise compared +//! to the versions using `unsafe`. In this case, the compromise is that the +//! element type must implement `Default` to be usable in these vecs. However, +//! that still allows you to use [quite a few +//! types](https://doc.rust-lang.org/std/default/trait.Default.html#implementors), +//! so I think that you'll find these vecs useful in many cases. +//! +//! * [`ArrayVec`](ArrayVec) is an array-backed vec-like structure with a fixed +//! capacity. If you try to grow the length past the array's capacity it will +//! error or panic (depending on the method used). +//! * (`alloc` feature) [`TinyVec`](TinyVec) is an enum that's either an +//! "Inline" `ArrayVec` or a "Heap" `Vec`. If it's Inline and you try to grow +//! the `ArrayVec` beyond its array capacity it will quietly transition into +//! Heap mode and then continue the operation. +//! +//! ## Crate Goals +//! +//! 1) The crate is 100% safe code. Not just a safe API, there are also no +//! `unsafe` internals. `#![forbid(unsafe_code)]`. +//! 2) No required dependencies. +//! * We might provide optional dependencies for extra functionality (eg: +//! `serde` compatibility). +//! 3) The intended API is that, _as much as possible_, these types are +//! essentially a "drop-in" replacement for the standard [`Vec`](Vec::) +//! type. +//! * Stable `Vec` methods that the vecs here also have should be the same +//! general signature. +//! * Unstable `Vec` methods are sometimes provided via a crate feature, but +//! if so they also require a Nightly compiler. +//! * Some methods are provided that _are not_ part of the `Vec` type, such +//! as additional constructor methods. In this case, the names are rather +//! long and whimsical in the hopes that they don't clash with any +//! possible future methods of `Vec`. +//! * If, in the future, `Vec` stabilizes a method that clashes with an +//! existing extra method here then we'll simply be forced to release a +//! 2.y.z version. Not the end of the world. +//! * Some methods of `Vec` are simply inappropriate and will not be +//! implemented here. For example, `ArrayVec` cannot possibly implement +//! [`from_raw_parts`](https://doc.rust-lang.org/std/vec/struct.Vec.html#method.from_raw_parts). + +use core::{ + borrow::{Borrow, BorrowMut}, + cmp::PartialEq, + convert::AsMut, + default::Default, + fmt::{ + Binary, Debug, Display, Formatter, LowerExp, LowerHex, Octal, Pointer, + UpperExp, UpperHex, + }, + hash::{Hash, Hasher}, + iter::{Extend, FromIterator, FusedIterator, IntoIterator, Iterator}, + mem::{needs_drop, replace}, + ops::{Deref, DerefMut, Index, IndexMut, RangeBounds}, + slice::SliceIndex, +}; + +#[cfg(feature = "alloc")] +#[doc(hidden)] // re-export for macros +pub extern crate alloc; + +mod array; +pub use array::*; + +mod arrayvec; +pub use arrayvec::*; + +#[cfg(feature = "alloc")] +mod tinyvec; +#[cfg(feature = "alloc")] +pub use crate::tinyvec::*; + +// TODO MSRV(1.40.0): Just call the normal `core::mem::take` +#[inline(always)] +fn take(from: &mut T) -> T { + replace(from, T::default()) +} diff -Nru cargo-0.44.1/vendor/tinyvec/src/tinyvec.rs cargo-0.47.0/vendor/tinyvec/src/tinyvec.rs --- cargo-0.44.1/vendor/tinyvec/src/tinyvec.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/tinyvec/src/tinyvec.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,1081 @@ +#![cfg(feature = "alloc")] + +use super::*; + +use alloc::vec::Vec; + +/// Helper to make a `TinyVec`. +/// +/// You specify the backing array type, and optionally give all the elements you +/// want to initially place into the array. +/// +/// As an unfortunate restriction, the backing array type must support `Default` +/// for it to work with this macro. +/// +/// ```rust +/// use tinyvec::*; +/// +/// // The backing array type can be specified in the macro call +/// let empty_tv = tiny_vec!([u8; 16]); +/// let some_ints = tiny_vec!([i32; 4], 1, 2, 3); +/// let many_ints = tiny_vec!([i32; 4], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); +/// +/// // Or left to inference +/// let empty_tv: TinyVec<[u8; 16]> = tiny_vec!(); +/// let some_ints: TinyVec<[i32; 4]> = tiny_vec!(1, 2, 3); +/// let many_ints: TinyVec<[i32; 4]> = tiny_vec!(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); +/// ``` +#[macro_export] +macro_rules! tiny_vec { + ($array_type:ty) => { + { + let mut tv: $crate::TinyVec<$array_type> = Default::default(); + tv + } + }; + ($array_type:ty, $($elem:expr),* $(,)?) => { + { + // Note(Lokathor): This goofy looking thing will count the number of + // `$elem` entries we were given. We can't spit out the "+1"s on their + // own, we need to use `$elem` in the repetition-expansion somehow. + // However, we also can't assume it's `Copy` data, so we must use `$elem` + // only once "for real" in the expansion as a whole. To achieve this, we + // can `stringify!` each element in an inner block, then have the block + // return a 1. The stringification is a compile time thing, it won't + // actually move any values. + const INVOKED_ELEM_COUNT: usize = 0 $( + { let _ = stringify!($elem); 1 })*; + // If we have more `$elem` than the `CAPACITY` we will simply go directly + // to constructing on the heap. + let av: $crate::TinyVec<$array_type> = $crate::TinyVec::from_either_with_capacity( + INVOKED_ELEM_COUNT, + #[inline(always)] || $crate::array_vec!($array_type, $($elem),*), + #[inline(always)] || vec!($($elem),*)); + av + } + }; + () => { + tiny_vec!(_) + }; + ($($elem:expr),*) => { + tiny_vec!(_, $($elem),*) + }; +} + +/// A vector that starts inline, but can automatically move to the heap. +/// +/// * Requires the `alloc` feature +/// +/// A `TinyVec` is either an Inline([`ArrayVec`](crate::ArrayVec::)) or +/// Heap([`Vec`](https://doc.rust-lang.org/alloc/vec/struct.Vec.html)). The +/// interface for the type as a whole is a bunch of methods that just match on +/// the enum variant and then call the same method on the inner vec. +/// +/// ## Construction +/// +/// Because it's an enum, you can construct a `TinyVec` simply by making an +/// `ArrayVec` or `Vec` and then putting it into the enum. +/// +/// There is also a macro +/// +/// ```rust +/// # use tinyvec::*; +/// let empty_tv = tiny_vec!([u8; 16]); +/// let some_ints = tiny_vec!([i32; 4], 1, 2, 3); +/// ``` +#[derive(Clone)] +pub enum TinyVec { + #[allow(missing_docs)] + Inline(ArrayVec), + #[allow(missing_docs)] + Heap(Vec), +} +impl Default for TinyVec { + #[inline] + #[must_use] + fn default() -> Self { + TinyVec::Inline(ArrayVec::default()) + } +} + +impl Deref for TinyVec { + type Target = [A::Item]; + #[inline(always)] + #[must_use] + fn deref(&self) -> &Self::Target { + match self { + TinyVec::Inline(a) => a.deref(), + TinyVec::Heap(v) => v.deref(), + } + } +} + +impl DerefMut for TinyVec { + #[inline(always)] + #[must_use] + fn deref_mut(&mut self) -> &mut Self::Target { + match self { + TinyVec::Inline(a) => a.deref_mut(), + TinyVec::Heap(v) => v.deref_mut(), + } + } +} + +impl> Index for TinyVec { + type Output = >::Output; + #[inline(always)] + #[must_use] + fn index(&self, index: I) -> &Self::Output { + &self.deref()[index] + } +} + +impl> IndexMut for TinyVec { + #[inline(always)] + #[must_use] + fn index_mut(&mut self, index: I) -> &mut Self::Output { + &mut self.deref_mut()[index] + } +} + +impl TinyVec { + /// Moves the content of the TinyVec to the heap, if it's inline. + #[allow(clippy::missing_inline_in_public_items)] + pub fn move_to_the_heap(&mut self) { + match self { + TinyVec::Inline(ref mut arr) => { + let mut v = Vec::with_capacity(A::CAPACITY * 2); + for item in arr.drain(..) { + v.push(item); + } + replace(self, TinyVec::Heap(v)); + } + TinyVec::Heap(_) => (), + } + } +} + +impl TinyVec { + /// Move all values from `other` into this vec. + #[inline] + pub fn append(&mut self, other: &mut Self) { + for item in other.drain(..) { + self.push(item) + } + } + + /// A mutable pointer to the backing array. + /// + /// ## Safety + /// + /// This pointer has provenance over the _entire_ backing array/buffer. + #[inline(always)] + #[must_use] + pub fn as_mut_ptr(&mut self) -> *mut A::Item { + match self { + TinyVec::Inline(a) => a.as_mut_ptr(), + TinyVec::Heap(v) => v.as_mut_ptr(), + } + } + + /// Helper for getting the mut slice. + #[inline(always)] + #[must_use] + pub fn as_mut_slice(&mut self) -> &mut [A::Item] { + self.deref_mut() + } + + /// A const pointer to the backing array. + /// + /// ## Safety + /// + /// This pointer has provenance over the _entire_ backing array/buffer. + #[inline(always)] + #[must_use] + pub fn as_ptr(&self) -> *const A::Item { + match self { + TinyVec::Inline(a) => a.as_ptr(), + TinyVec::Heap(v) => v.as_ptr(), + } + } + + /// Helper for getting the shared slice. + #[inline(always)] + #[must_use] + pub fn as_slice(&self) -> &[A::Item] { + self.deref() + } + + /// The capacity of the `TinyVec`. + /// + /// When not heap allocated this is fixed based on the array type. + /// Otherwise its the result of the underlying Vec::capacity. + #[inline(always)] + #[must_use] + pub fn capacity(&self) -> usize { + match self { + TinyVec::Inline(_) => A::CAPACITY, + TinyVec::Heap(v) => v.capacity(), + } + } + + /// Removes all elements from the vec. + #[inline(always)] + pub fn clear(&mut self) { + self.truncate(0) + } + + /// De-duplicates the vec. + #[cfg(feature = "nightly_slice_partition_dedup")] + #[inline(always)] + pub fn dedup(&mut self) + where + A::Item: PartialEq, + { + self.dedup_by(|a, b| a == b) + } + + /// De-duplicates the vec according to the predicate given. + #[cfg(feature = "nightly_slice_partition_dedup")] + #[inline(always)] + pub fn dedup_by(&mut self, same_bucket: F) + where + F: FnMut(&mut A::Item, &mut A::Item) -> bool, + { + let len = { + let (dedup, _) = self.as_mut_slice().partition_dedup_by(same_bucket); + dedup.len() + }; + self.truncate(len); + } + + /// De-duplicates the vec according to the key selector given. + #[cfg(feature = "nightly_slice_partition_dedup")] + #[inline(always)] + pub fn dedup_by_key(&mut self, mut key: F) + where + F: FnMut(&mut A::Item) -> K, + K: PartialEq, + { + self.dedup_by(|a, b| key(a) == key(b)) + } + + /// Creates a draining iterator that removes the specified range in the vector + /// and yields the removed items. + /// + /// ## Panics + /// * If the start is greater than the end + /// * If the end is past the edge of the vec. + /// + /// ## Example + /// ```rust + /// use tinyvec::*; + /// let mut tv = tiny_vec!([i32; 4], 1, 2, 3); + /// let tv2: TinyVec<[i32; 4]> = tv.drain(1..).collect(); + /// assert_eq!(tv.as_slice(), &[1][..]); + /// assert_eq!(tv2.as_slice(), &[2, 3][..]); + /// + /// tv.drain(..); + /// assert_eq!(tv.as_slice(), &[]); + /// ``` + #[inline] + pub fn drain>( + &mut self, + range: R, + ) -> TinyVecDrain<'_, A> { + use core::ops::Bound; + let start = match range.start_bound() { + Bound::Included(x) => *x, + Bound::Excluded(x) => x + 1, + Bound::Unbounded => 0, + }; + let end = match range.end_bound() { + Bound::Included(x) => x + 1, + Bound::Excluded(x) => *x, + Bound::Unbounded => self.len(), + }; + assert!( + start <= end, + "TinyVec::drain> Illegal range, {} to {}", + start, + end + ); + assert!( + end <= self.len(), + "TinyVec::drain> Range ends at {} but length is only {}!", + end, + self.len() + ); + TinyVecDrain { + parent: self, + target_index: start, + target_count: end - start, + } + } + + /// Clone each element of the slice into this vec. + #[inline] + pub fn extend_from_slice(&mut self, sli: &[A::Item]) + where + A::Item: Clone, + { + for i in sli { + self.push(i.clone()) + } + } + + /// Wraps up an array and uses the given length as the initial length. + /// + /// Note that the `From` impl for arrays assumes the full length is used. + /// + /// ## Panics + /// + /// The length must be less than or equal to the capacity of the array. + #[inline] + #[must_use] + #[allow(clippy::match_wild_err_arm)] + pub fn from_array_len(data: A, len: usize) -> Self { + match Self::try_from_array_len(data, len) { + Ok(out) => out, + Err(_) => { + panic!("TinyVec: length {} exceeds capacity {}!", len, A::CAPACITY) + } + } + } + + #[inline(always)] + #[doc(hidden)] // Internal implementation details of `tiny_vec!` + pub fn from_either_with_capacity( + cap: usize, + make_array: impl FnOnce() -> ArrayVec, + make_vec: impl FnOnce() -> Vec, + ) -> Self { + if cap <= A::CAPACITY { + TinyVec::Inline(make_array()) + } else { + TinyVec::Heap(make_vec()) + } + } + + /// Inserts an item at the position given, moving all following elements +1 + /// index. + /// + /// ## Panics + /// * If `index` > `len` + /// + /// ## Example + /// ```rust + /// use tinyvec::*; + /// let mut tv = tiny_vec!([i32; 10], 1, 2, 3); + /// tv.insert(1, 4); + /// assert_eq!(tv.as_slice(), &[1, 4, 2, 3]); + /// tv.insert(4, 5); + /// assert_eq!(tv.as_slice(), &[1, 4, 2, 3, 5]); + /// ``` + #[inline] + pub fn insert(&mut self, index: usize, item: A::Item) { + match self { + TinyVec::Inline(a) => { + if a.len() == A::CAPACITY { + self.move_to_the_heap(); + self.insert(index, item) + } else { + a.insert(index, item); + } + } + TinyVec::Heap(v) => v.insert(index, item), + } + } + + /// If the vec is empty. + #[inline(always)] + #[must_use] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// The length of the vec (in elements). + #[inline(always)] + #[must_use] + pub fn len(&self) -> usize { + match self { + TinyVec::Inline(a) => a.len(), + TinyVec::Heap(v) => v.len(), + } + } + + /// Makes a new, empty vec. + #[inline(always)] + #[must_use] + pub fn new() -> Self + where + A: Default, + { + Self::default() + } + + /// Remove and return the last element of the vec, if there is one. + /// + /// ## Failure + /// * If the vec is empty you get `None`. + #[inline] + pub fn pop(&mut self) -> Option { + match self { + TinyVec::Inline(a) => a.pop(), + TinyVec::Heap(v) => v.pop(), + } + } + + /// Place an element onto the end of the vec. + /// ## Panics + /// * If the length of the vec would overflow the capacity. + #[inline(always)] + pub fn push(&mut self, val: A::Item) { + match self { + TinyVec::Inline(a) => { + if a.len() == A::CAPACITY { + self.move_to_the_heap(); + self.push(val) + } else { + a.push(val); + } + } + TinyVec::Heap(v) => v.push(val), + } + } + + /// Removes the item at `index`, shifting all others down by one index. + /// + /// Returns the removed element. + /// + /// ## Panics + /// + /// If the index is out of bounds. + /// + /// ## Example + /// + /// ```rust + /// use tinyvec::*; + /// let mut tv = tiny_vec!([i32; 4], 1, 2, 3); + /// assert_eq!(tv.remove(1), 2); + /// assert_eq!(tv.as_slice(), &[1, 3][..]); + /// ``` + #[inline] + pub fn remove(&mut self, index: usize) -> A::Item { + match self { + TinyVec::Inline(a) => a.remove(index), + TinyVec::Heap(v) => v.remove(index), + } + } + + /// Resize the vec to the new length. + /// + /// If it needs to be longer, it's filled with clones of the provided value. + /// If it needs to be shorter, it's truncated. + /// + /// ## Example + /// + /// ```rust + /// use tinyvec::*; + /// + /// let mut tv = tiny_vec!([&str; 10], "hello"); + /// tv.resize(3, "world"); + /// assert_eq!(tv.as_slice(), &["hello", "world", "world"][..]); + /// + /// let mut tv = tiny_vec!([i32; 10], 1, 2, 3, 4); + /// tv.resize(2, 0); + /// assert_eq!(tv.as_slice(), &[1, 2][..]); + /// ``` + #[inline] + pub fn resize(&mut self, new_len: usize, new_val: A::Item) + where + A::Item: Clone, + { + match self { + TinyVec::Inline(a) => { + if new_len > A::CAPACITY { + self.move_to_the_heap(); + self.resize(new_len, new_val); + } else { + a.resize(new_len, new_val); + } + } + TinyVec::Heap(v) => v.resize(new_len, new_val), + } + } + + /// Resize the vec to the new length. + /// + /// If it needs to be longer, it's filled with repeated calls to the provided + /// function. If it needs to be shorter, it's truncated. + /// + /// ## Example + /// + /// ```rust + /// use tinyvec::*; + /// + /// let mut tv = tiny_vec!([i32; 10], 1, 2, 3); + /// tv.resize_with(5, Default::default); + /// assert_eq!(tv.as_slice(), &[1, 2, 3, 0, 0][..]); + /// + /// let mut tv = tiny_vec!([i32; 10]); + /// let mut p = 1; + /// tv.resize_with(4, || { + /// p *= 2; + /// p + /// }); + /// assert_eq!(tv.as_slice(), &[2, 4, 8, 16][..]); + /// ``` + #[inline] + pub fn resize_with A::Item>(&mut self, new_len: usize, f: F) { + match self { + TinyVec::Inline(a) => a.resize_with(new_len, f), + TinyVec::Heap(v) => v.resize_with(new_len, f), + } + } + + /// Walk the vec and keep only the elements that pass the predicate given. + /// + /// ## Example + /// + /// ```rust + /// use tinyvec::*; + /// + /// let mut tv = tiny_vec!([i32; 10], 1, 2, 3, 4); + /// tv.retain(|&x| x % 2 == 0); + /// assert_eq!(tv.as_slice(), &[2, 4][..]); + /// ``` + #[inline] + pub fn retain bool>(&mut self, acceptable: F) { + match self { + TinyVec::Inline(a) => a.retain(acceptable), + TinyVec::Heap(v) => v.retain(acceptable), + } + } + + /// Splits the collection at the point given. + /// + /// * `[0, at)` stays in this vec + /// * `[at, len)` ends up in the new vec. + /// + /// ## Panics + /// * if at > len + /// + /// ## Example + /// + /// ```rust + /// use tinyvec::*; + /// let mut tv = tiny_vec!([i32; 4], 1, 2, 3); + /// let tv2 = tv.split_off(1); + /// assert_eq!(tv.as_slice(), &[1][..]); + /// assert_eq!(tv2.as_slice(), &[2, 3][..]); + /// ``` + #[inline] + pub fn split_off(&mut self, at: usize) -> Self + where + A: Default, + { + match self { + TinyVec::Inline(a) => TinyVec::Inline(a.split_off(at)), + TinyVec::Heap(v) => TinyVec::Heap(v.split_off(at)), + } + } + + /// Remove an element, swapping the end of the vec into its place. + /// + /// ## Panics + /// * If the index is out of bounds. + /// + /// ## Example + /// ```rust + /// use tinyvec::*; + /// let mut tv = tiny_vec!([&str; 4], "foo", "bar", "quack", "zap"); + /// + /// assert_eq!(tv.swap_remove(1), "bar"); + /// assert_eq!(tv.as_slice(), &["foo", "zap", "quack"][..]); + /// + /// assert_eq!(tv.swap_remove(0), "foo"); + /// assert_eq!(tv.as_slice(), &["quack", "zap"][..]); + /// ``` + #[inline] + pub fn swap_remove(&mut self, index: usize) -> A::Item { + match self { + TinyVec::Inline(a) => a.swap_remove(index), + TinyVec::Heap(v) => v.swap_remove(index), + } + } + + /// Reduces the vec's length to the given value. + /// + /// If the vec is already shorter than the input, nothing happens. + #[inline] + pub fn truncate(&mut self, new_len: usize) { + match self { + TinyVec::Inline(a) => a.truncate(new_len), + TinyVec::Heap(v) => v.truncate(new_len), + } + } + + /// Wraps an array, using the given length as the starting length. + /// + /// If you want to use the whole length of the array, you can just use the + /// `From` impl. + /// + /// ## Failure + /// + /// If the given length is greater than the capacity of the array this will + /// error, and you'll get the array back in the `Err`. + #[inline] + pub fn try_from_array_len(data: A, len: usize) -> Result { + let arr = ArrayVec::try_from_array_len(data, len)?; + Ok(TinyVec::Inline(arr)) + } +} + +/// Draining iterator for `TinyVecDrain` +/// +/// See [`TinyVecDrain::drain`](TinyVecDrain::::drain) +pub struct TinyVecDrain<'p, A: Array> { + parent: &'p mut TinyVec, + target_index: usize, + target_count: usize, +} +impl<'p, A: Array> FusedIterator for TinyVecDrain<'p, A> { } +impl<'p, A: Array> Iterator for TinyVecDrain<'p, A> { + type Item = A::Item; + #[inline] + fn next(&mut self) -> Option { + if self.target_count > 0 { + let out = self.parent.remove(self.target_index); + self.target_count -= 1; + Some(out) + } else { + None + } + } +} +impl<'p, A: Array> Drop for TinyVecDrain<'p, A> { + #[inline] + fn drop(&mut self) { + for _ in self {} + } +} + +impl AsMut<[A::Item]> for TinyVec { + #[inline(always)] + #[must_use] + fn as_mut(&mut self) -> &mut [A::Item] { + &mut *self + } +} + +impl AsRef<[A::Item]> for TinyVec { + #[inline(always)] + #[must_use] + fn as_ref(&self) -> &[A::Item] { + &*self + } +} + +impl Borrow<[A::Item]> for TinyVec { + #[inline(always)] + #[must_use] + fn borrow(&self) -> &[A::Item] { + &*self + } +} + +impl BorrowMut<[A::Item]> for TinyVec { + #[inline(always)] + #[must_use] + fn borrow_mut(&mut self) -> &mut [A::Item] { + &mut *self + } +} + +impl Extend for TinyVec { + #[inline] + fn extend>(&mut self, iter: T) { + for t in iter { + self.push(t) + } + } +} + +impl From> for TinyVec { + #[inline(always)] + #[must_use] + fn from(arr: ArrayVec) -> Self { + TinyVec::Inline(arr) + } +} + +impl From for TinyVec { + fn from(array: A) -> Self { + TinyVec::Inline(ArrayVec::from(array)) + } +} + +impl From<&'_ [T]> for TinyVec +where + T: Clone + Default, + A: Array + Default, +{ + #[inline] + #[must_use] + fn from(slice: &[T]) -> Self { + if slice.len() > A::CAPACITY { + TinyVec::Heap(slice.into()) + } else { + let mut arr = ArrayVec::new(); + arr.extend_from_slice(slice); + + TinyVec::Inline(arr) + } + } +} + +impl From<&'_ mut [T]> for TinyVec +where + T: Clone + Default, + A: Array + Default, +{ + #[inline] + #[must_use] + fn from(slice: &mut [T]) -> Self { + Self::from(&*slice) + } +} + +impl FromIterator for TinyVec { + #[inline] + #[must_use] + fn from_iter>(iter: T) -> Self { + let mut av = Self::default(); + for i in iter { + av.push(i) + } + av + } +} + +/// Iterator for consuming an `TinyVec` and returning owned elements. +pub enum TinyVecIterator { + #[allow(missing_docs)] + Inline(ArrayVecIterator), + #[allow(missing_docs)] + Heap(alloc::vec::IntoIter), +} + +impl TinyVecIterator { + /// Returns the remaining items of this iterator as a slice. + #[inline] + #[must_use] + pub fn as_slice(&self) -> &[A::Item] { + match self { + TinyVecIterator::Inline(a) => a.as_slice(), + TinyVecIterator::Heap(v) => v.as_slice(), + } + } +} +impl FusedIterator for TinyVecIterator { } +impl Iterator for TinyVecIterator { + type Item = A::Item; + #[inline] + fn next(&mut self) -> Option { + match self { + TinyVecIterator::Inline(a) => a.next(), + TinyVecIterator::Heap(v) => v.next(), + } + } + #[inline(always)] + #[must_use] + fn size_hint(&self) -> (usize, Option) { + match self { + TinyVecIterator::Inline(a) => a.size_hint(), + TinyVecIterator::Heap(v) => v.size_hint(), + } + } + #[inline(always)] + fn count(self) -> usize { + match self { + TinyVecIterator::Inline(a) => a.count(), + TinyVecIterator::Heap(v) => v.count(), + } + } + #[inline] + fn last(self) -> Option { + match self { + TinyVecIterator::Inline(a) => a.last(), + TinyVecIterator::Heap(v) => v.last(), + } + } + #[inline] + fn nth(&mut self, n: usize) -> Option { + match self { + TinyVecIterator::Inline(a) => a.nth(n), + TinyVecIterator::Heap(v) => v.nth(n), + } + } +} + +impl Debug for TinyVecIterator where A::Item: Debug { + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("TinyVecIterator").field(&self.as_slice()).finish() + } +} + +impl IntoIterator for TinyVec { + type Item = A::Item; + type IntoIter = TinyVecIterator; + #[inline(always)] + #[must_use] + fn into_iter(self) -> Self::IntoIter { + match self { + TinyVec::Inline(a) => TinyVecIterator::Inline(a.into_iter()), + TinyVec::Heap(v) => TinyVecIterator::Heap(v.into_iter()), + } + } +} + +impl<'a, A: Array> IntoIterator for &'a mut TinyVec { + type Item = &'a mut A::Item; + type IntoIter = alloc::slice::IterMut<'a, A::Item>; + #[inline(always)] + #[must_use] + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +impl<'a, A: Array> IntoIterator for &'a TinyVec { + type Item = &'a A::Item; + type IntoIter = alloc::slice::Iter<'a, A::Item>; + #[inline(always)] + #[must_use] + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl PartialEq for TinyVec +where + A::Item: PartialEq, +{ + #[inline] + #[must_use] + fn eq(&self, other: &Self) -> bool { + self.as_slice().eq(other.as_slice()) + } +} +impl Eq for TinyVec where A::Item: Eq {} + +impl PartialOrd for TinyVec +where + A::Item: PartialOrd, +{ + #[inline] + #[must_use] + fn partial_cmp(&self, other: &Self) -> Option { + self.as_slice().partial_cmp(other.as_slice()) + } +} +impl Ord for TinyVec +where + A::Item: Ord, +{ + #[inline] + #[must_use] + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.as_slice().cmp(other.as_slice()) + } +} + +impl PartialEq<&A> for TinyVec +where + A::Item: PartialEq, +{ + #[inline] + #[must_use] + fn eq(&self, other: &&A) -> bool { + self.as_slice().eq(other.as_slice()) + } +} + +impl PartialEq<&[A::Item]> for TinyVec +where + A::Item: PartialEq, +{ + #[inline] + #[must_use] + fn eq(&self, other: &&[A::Item]) -> bool { + self.as_slice().eq(*other) + } +} + +impl Hash for TinyVec +where + A::Item: Hash, +{ + #[inline] + fn hash(&self, state: &mut H) { + self.as_slice().hash(state) + } +} + +// // // // // // // // +// Formatting impls +// // // // // // // // + +impl Binary for TinyVec +where + A::Item: Binary, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + Binary::fmt(elem, f)?; + } + write!(f, "]") + } +} + +impl Debug for TinyVec +where + A::Item: Debug, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + Debug::fmt(elem, f)?; + } + write!(f, "]") + } +} + +impl Display for TinyVec +where + A::Item: Display, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + Display::fmt(elem, f)?; + } + write!(f, "]") + } +} + +impl LowerExp for TinyVec +where + A::Item: LowerExp, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + LowerExp::fmt(elem, f)?; + } + write!(f, "]") + } +} + +impl LowerHex for TinyVec +where + A::Item: LowerHex, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + LowerHex::fmt(elem, f)?; + } + write!(f, "]") + } +} + +impl Octal for TinyVec +where + A::Item: Octal, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + Octal::fmt(elem, f)?; + } + write!(f, "]") + } +} + +impl Pointer for TinyVec +where + A::Item: Pointer, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + Pointer::fmt(elem, f)?; + } + write!(f, "]") + } +} + +impl UpperExp for TinyVec +where + A::Item: UpperExp, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + UpperExp::fmt(elem, f)?; + } + write!(f, "]") + } +} + +impl UpperHex for TinyVec +where + A::Item: UpperHex, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "[")?; + for (i, elem) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + UpperHex::fmt(elem, f)?; + } + write!(f, "]") + } +} diff -Nru cargo-0.44.1/vendor/tinyvec/tests/arrayvec.rs cargo-0.47.0/vendor/tinyvec/tests/arrayvec.rs --- cargo-0.44.1/vendor/tinyvec/tests/arrayvec.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/tinyvec/tests/arrayvec.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,178 @@ +#![allow(bad_style)] + +use std::iter::FromIterator; +use tinyvec::*; + +#[test] +fn test_a_vec() { + let mut expected: ArrayVec<[i32; 4]> = Default::default(); + expected.push(1); + expected.push(2); + expected.push(3); + + let actual = array_vec!([i32; 4], 1, 2, 3); + + assert_eq!(expected, actual); +} + +#[test] +fn ArrayVec_push_pop() { + let mut av: ArrayVec<[i32; 4]> = Default::default(); + assert_eq!(av.len(), 0); + assert_eq!(av.pop(), None); + + av.push(10_i32); + assert_eq!(av.len(), 1); + assert_eq!(av[0], 10); + assert_eq!(av.pop(), Some(10)); + assert_eq!(av.len(), 0); + assert_eq!(av.pop(), None); + + av.push(10); + av.push(11); + av.push(12); + av.push(13); + assert_eq!(av[0], 10); + assert_eq!(av[1], 11); + assert_eq!(av[2], 12); + assert_eq!(av[3], 13); + assert_eq!(av.len(), 4); + assert_eq!(av.pop(), Some(13)); + assert_eq!(av.len(), 3); + assert_eq!(av.pop(), Some(12)); + assert_eq!(av.len(), 2); + assert_eq!(av.pop(), Some(11)); + assert_eq!(av.len(), 1); + assert_eq!(av.pop(), Some(10)); + assert_eq!(av.len(), 0); + assert_eq!(av.pop(), None); +} + +#[test] +#[should_panic] +fn ArrayVec_push_overflow() { + let mut av: ArrayVec<[i32; 0]> = Default::default(); + av.push(7); +} + +#[test] +fn ArrayVec_formatting() { + // check that we get the comma placement correct + + let mut av: ArrayVec<[i32; 4]> = Default::default(); + assert_eq!(format!("{:?}", av), "[]"); + av.push(10); + assert_eq!(format!("{:?}", av), "[10]"); + av.push(11); + assert_eq!(format!("{:?}", av), "[10, 11]"); + av.push(12); + assert_eq!(format!("{:?}", av), "[10, 11, 12]"); + + // below here just asserts that the impls exist. + + // + let av: ArrayVec<[i32; 4]> = Default::default(); + assert_eq!(format!("{:b}", av), "[]"); + assert_eq!(format!("{:o}", av), "[]"); + assert_eq!(format!("{:x}", av), "[]"); + assert_eq!(format!("{:X}", av), "[]"); + assert_eq!(format!("{}", av), "[]"); + // + let av: ArrayVec<[f32; 4]> = Default::default(); + assert_eq!(format!("{:e}", av), "[]"); + assert_eq!(format!("{:E}", av), "[]"); + // + let av: ArrayVec<[&'static str; 4]> = Default::default(); + assert_eq!(format!("{:p}", av), "[]"); +} + +#[test] +fn ArrayVec_iteration() { + let av = array_vec!([i32; 4], 10, 11, 12, 13); + + let mut i = av.into_iter(); + assert_eq!(i.next(), Some(10)); + assert_eq!(i.next(), Some(11)); + assert_eq!(i.next(), Some(12)); + assert_eq!(i.next(), Some(13)); + assert_eq!(i.next(), None); + + let av = array_vec!([i32; 4], 10, 11, 12, 13); + + let av2: ArrayVec<[i32; 4]> = av.clone().into_iter().collect(); + assert_eq!(av, av2); +} + +#[test] +fn ArrayVec_append() { + let mut av = array_vec!([i32; 8], 1, 2, 3); + let mut av2 = array_vec!([i32; 8], 4, 5, 6); + // + av.append(&mut av2); + assert_eq!(av.as_slice(), &[1_i32, 2, 3, 4, 5, 6]); + assert_eq!(av2.as_slice(), &[]); +} + +#[test] +fn ArrayVec_remove() { + let mut av: ArrayVec<[i32; 10]> = Default::default(); + av.push(1); + av.push(2); + av.push(3); + assert_eq!(av.remove(1), 2); + assert_eq!(&av[..], &[1, 3][..]); +} + +#[test] +#[should_panic] +fn ArrayVec_remove_invalid() { + let mut av: ArrayVec<[i32; 1]> = Default::default(); + av.push(1); + av.remove(1); +} + +#[test] +fn ArrayVec_swap_remove() { + let mut av: ArrayVec<[i32; 10]> = Default::default(); + av.push(1); + av.push(2); + av.push(3); + av.push(4); + assert_eq!(av.swap_remove(3), 4); + assert_eq!(&av[..], &[1, 2, 3][..]); + assert_eq!(av.swap_remove(0), 1); + assert_eq!(&av[..], &[3, 2][..]); + assert_eq!(av.swap_remove(0), 3); + assert_eq!(&av[..], &[2][..]); + assert_eq!(av.swap_remove(0), 2); + assert_eq!(&av[..], &[][..]); +} + +#[test] +fn ArrayVec_drain() { + let mut av: ArrayVec<[i32; 10]> = Default::default(); + av.push(1); + av.push(2); + av.push(3); + + assert_eq!(Vec::from_iter(av.clone().drain(..)), vec![1, 2, 3]); + + assert_eq!(Vec::from_iter(av.clone().drain(..2)), vec![1, 2]); + assert_eq!(Vec::from_iter(av.clone().drain(..3)), vec![1, 2, 3]); + + assert_eq!(Vec::from_iter(av.clone().drain(..=1)), vec![1, 2]); + assert_eq!(Vec::from_iter(av.clone().drain(..=2)), vec![1, 2, 3]); + + assert_eq!(Vec::from_iter(av.clone().drain(0..)), vec![1, 2, 3]); + assert_eq!(Vec::from_iter(av.clone().drain(1..)), vec![2, 3]); + + assert_eq!(Vec::from_iter(av.clone().drain(0..2)), vec![1, 2]); + assert_eq!(Vec::from_iter(av.clone().drain(0..3)), vec![1, 2, 3]); + assert_eq!(Vec::from_iter(av.clone().drain(1..2)), vec![2]); + assert_eq!(Vec::from_iter(av.clone().drain(1..3)), vec![2, 3]); + + assert_eq!(Vec::from_iter(av.clone().drain(0..=1)), vec![1, 2]); + assert_eq!(Vec::from_iter(av.clone().drain(0..=2)), vec![1, 2, 3]); + assert_eq!(Vec::from_iter(av.clone().drain(1..=1)), vec![2]); + assert_eq!(Vec::from_iter(av.clone().drain(1..=2)), vec![2, 3]); +} diff -Nru cargo-0.44.1/vendor/tinyvec/tests/tinyvec.rs cargo-0.47.0/vendor/tinyvec/tests/tinyvec.rs --- cargo-0.44.1/vendor/tinyvec/tests/tinyvec.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/tinyvec/tests/tinyvec.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,93 @@ +#![allow(bad_style)] +#![allow(clippy::redundant_clone)] + +use std::iter::FromIterator; +use tinyvec::*; + +#[test] +fn TinyVec_swap_remove() { + let mut tv: TinyVec<[i32; 10]> = Default::default(); + tv.push(1); + tv.push(2); + tv.push(3); + tv.push(4); + assert_eq!(tv.swap_remove(3), 4); + assert_eq!(&tv[..], &[1, 2, 3][..]); + assert_eq!(tv.swap_remove(0), 1); + assert_eq!(&tv[..], &[3, 2][..]); + assert_eq!(tv.swap_remove(0), 3); + assert_eq!(&tv[..], &[2][..]); + assert_eq!(tv.swap_remove(0), 2); + assert_eq!(&tv[..], &[][..]); +} + +#[test] +fn TinyVec_capacity() { + let mut tv: TinyVec<[i32; 1]> = Default::default(); + assert_eq!(tv.capacity(), 1); + tv.move_to_the_heap(); + tv.extend_from_slice(&[1, 2, 3, 4]); + assert_eq!(tv.capacity(), 4); +} + +#[test] +fn TinyVec_drain() { + let mut tv: TinyVec<[i32; 10]> = Default::default(); + tv.push(1); + tv.push(2); + tv.push(3); + + assert_eq!(Vec::from_iter(tv.clone().drain(..)), vec![1, 2, 3]); + + assert_eq!(Vec::from_iter(tv.clone().drain(..2)), vec![1, 2]); + assert_eq!(Vec::from_iter(tv.clone().drain(..3)), vec![1, 2, 3]); + + assert_eq!(Vec::from_iter(tv.clone().drain(..=1)), vec![1, 2]); + assert_eq!(Vec::from_iter(tv.clone().drain(..=2)), vec![1, 2, 3]); + + assert_eq!(Vec::from_iter(tv.clone().drain(0..)), vec![1, 2, 3]); + assert_eq!(Vec::from_iter(tv.clone().drain(1..)), vec![2, 3]); + + assert_eq!(Vec::from_iter(tv.clone().drain(0..2)), vec![1, 2]); + assert_eq!(Vec::from_iter(tv.clone().drain(0..3)), vec![1, 2, 3]); + assert_eq!(Vec::from_iter(tv.clone().drain(1..2)), vec![2]); + assert_eq!(Vec::from_iter(tv.clone().drain(1..3)), vec![2, 3]); + + assert_eq!(Vec::from_iter(tv.clone().drain(0..=1)), vec![1, 2]); + assert_eq!(Vec::from_iter(tv.clone().drain(0..=2)), vec![1, 2, 3]); + assert_eq!(Vec::from_iter(tv.clone().drain(1..=1)), vec![2]); + assert_eq!(Vec::from_iter(tv.clone().drain(1..=2)), vec![2, 3]); +} + +#[test] +fn TinyVec_resize() { + let mut tv: TinyVec<[i32; 10]> = Default::default(); + tv.resize(20, 5); + assert_eq!(&tv[..], &[5; 20]); +} + +#[test] +fn TinyVec_from_slice_impl() { + let bigger_slice: [u8; 11] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let tinyvec: TinyVec<[u8; 10]> = TinyVec::Heap((&bigger_slice[..]).into()); + assert_eq!(TinyVec::from(&bigger_slice[..]), tinyvec); + + let smaller_slice: [u8; 5] = [0, 1, 2, 3, 4]; + let tinyvec: TinyVec<[u8; 10]> = TinyVec::Inline(ArrayVec::from_array_len( + [0, 1, 2, 3, 4, 0, 0, 0, 0, 0], + 5, + )); + assert_eq!(TinyVec::from(&smaller_slice[..]), tinyvec); + + let same_size: [u8; 4] = [0, 1, 2, 3]; + let tinyvec: TinyVec<[u8; 4]> = + TinyVec::Inline(ArrayVec::from_array_len(same_size, 4)); + assert_eq!(TinyVec::from(&same_size[..]), tinyvec); +} + +#[test] +fn TinyVec_from_array() { + let array = [9, 8, 7, 6, 5, 4, 3, 2, 1]; + let tv = TinyVec::from(array); + assert_eq!(&array, &tv[..]); +} diff -Nru cargo-0.44.1/vendor/unicode-normalization/benches/bench.rs cargo-0.47.0/vendor/unicode-normalization/benches/bench.rs --- cargo-0.44.1/vendor/unicode-normalization/benches/bench.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/benches/bench.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,7 +1,7 @@ #![feature(test)] -#![feature(iterator_step_by)] -extern crate unicode_normalization; + extern crate test; +extern crate unicode_normalization; use std::fs; use test::Bencher; diff -Nru cargo-0.44.1/vendor/unicode-normalization/.cargo-checksum.json cargo-0.47.0/vendor/unicode-normalization/.cargo-checksum.json --- cargo-0.44.1/vendor/unicode-normalization/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4"} \ No newline at end of file +{"files":{},"package":"6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/unicode-normalization/Cargo.toml cargo-0.47.0/vendor/unicode-normalization/Cargo.toml --- cargo-0.44.1/vendor/unicode-normalization/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -11,8 +11,9 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "unicode-normalization" -version = "0.1.12" +version = "0.1.13" authors = ["kwantam "] exclude = ["target/*", "Cargo.lock", "scripts/tmp", "*.txt", "tests/*"] description = "This crate provides functions for normalization of\nUnicode strings, including Canonical and Compatible\nDecomposition and Recomposition, as described in\nUnicode Standard Annex #15.\n" @@ -22,5 +23,10 @@ keywords = ["text", "unicode", "normalization", "decomposition", "recomposition"] license = "MIT/Apache-2.0" repository = "https://github.com/unicode-rs/unicode-normalization" -[dependencies.smallvec] -version = "1.1" +[dependencies.tinyvec] +version = "0.3.3" +features = ["alloc"] + +[features] +default = ["std"] +std = [] diff -Nru cargo-0.44.1/vendor/unicode-normalization/README.md cargo-0.47.0/vendor/unicode-normalization/README.md --- cargo-0.44.1/vendor/unicode-normalization/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -31,5 +31,9 @@ ```toml [dependencies] -unicode-normalization = "0.1.8" +unicode-normalization = "0.1.13" ``` + +## `no_std` + `alloc` support + +This crate is completely `no_std` + `alloc` compatible. This can be enabled by disabling the `std` feature, i.e. specifying `default-features = false` for this crate on your `Cargo.toml`. diff -Nru cargo-0.44.1/vendor/unicode-normalization/scripts/unicode.py cargo-0.47.0/vendor/unicode-normalization/scripts/unicode.py --- cargo-0.44.1/vendor/unicode-normalization/scripts/unicode.py 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/scripts/unicode.py 2020-10-01 21:38:28.000000000 +0000 @@ -477,8 +477,8 @@ data = UnicodeData() with open("tables.rs", "w", newline = "\n") as out: out.write(PREAMBLE) - out.write("use quick_check::IsNormalized;\n") - out.write("use quick_check::IsNormalized::*;\n") + out.write("use crate::quick_check::IsNormalized;\n") + out.write("use crate::quick_check::IsNormalized::*;\n") out.write("\n") version = "(%s, %s, %s)" % tuple(UNICODE_VERSION.split(".")) diff -Nru cargo-0.44.1/vendor/unicode-normalization/src/decompose.rs cargo-0.47.0/vendor/unicode-normalization/src/decompose.rs --- cargo-0.44.1/vendor/unicode-normalization/src/decompose.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/src/decompose.rs 2020-10-01 21:38:28.000000000 +0000 @@ -7,10 +7,10 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -use smallvec::SmallVec; -use std::fmt::{self, Write}; -use std::iter::Fuse; -use std::ops::Range; +use core::fmt::{self, Write}; +use core::iter::Fuse; +use core::ops::Range; +use tinyvec::TinyVec; #[derive(Clone)] enum DecompositionType { @@ -32,26 +32,26 @@ // 2) "Ready" characters which are sorted and ready to emit on demand; // 3) A "pending" block which stills needs more characters for us to be able // to sort in canonical order and is not safe to emit. - buffer: SmallVec<[(u8, char); 4]>, + buffer: TinyVec<[(u8, char); 4]>, ready: Range, } #[inline] -pub fn new_canonical>(iter: I) -> Decompositions { +pub fn new_canonical>(iter: I) -> Decompositions { Decompositions { kind: self::DecompositionType::Canonical, iter: iter.fuse(), - buffer: SmallVec::new(), + buffer: TinyVec::new(), ready: 0..0, } } #[inline] -pub fn new_compatible>(iter: I) -> Decompositions { +pub fn new_compatible>(iter: I) -> Decompositions { Decompositions { kind: self::DecompositionType::Compatible, iter: iter.fuse(), - buffer: SmallVec::new(), + buffer: TinyVec::new(), ready: 0..0, } } @@ -78,8 +78,8 @@ #[inline] fn reset_buffer(&mut self) { - // Equivalent to `self.buffer.drain(0..self.ready.end)` (if SmallVec - // supported this API) + // Equivalent to `self.buffer.drain(0..self.ready.end)` + // but faster than drain() if the buffer is a SmallVec or TinyVec let pending = self.buffer.len() - self.ready.end; for i in 0..pending { self.buffer[i] = self.buffer[i + self.ready.end]; @@ -99,7 +99,7 @@ } } -impl> Iterator for Decompositions { +impl> Iterator for Decompositions { type Item = char; #[inline] @@ -149,7 +149,7 @@ } } -impl + Clone> fmt::Display for Decompositions { +impl + Clone> fmt::Display for Decompositions { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { for c in self.clone() { f.write_char(c)?; diff -Nru cargo-0.44.1/vendor/unicode-normalization/src/lib.rs cargo-0.47.0/vendor/unicode-normalization/src/lib.rs --- cargo-0.44.1/vendor/unicode-normalization/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -38,81 +38,78 @@ //! ``` #![deny(missing_docs, unsafe_code)] -#![doc(html_logo_url = "https://unicode-rs.github.io/unicode-rs_sm.png", - html_favicon_url = "https://unicode-rs.github.io/unicode-rs_sm.png")] - -extern crate smallvec; - -pub use tables::UNICODE_VERSION; -pub use decompose::Decompositions; -pub use quick_check::{ +#![doc( + html_logo_url = "https://unicode-rs.github.io/unicode-rs_sm.png", + html_favicon_url = "https://unicode-rs.github.io/unicode-rs_sm.png" +)] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(not(feature = "std"))] +extern crate alloc; + +#[cfg(feature = "std")] +extern crate core; + +extern crate tinyvec; + +pub use crate::decompose::Decompositions; +pub use crate::quick_check::{ + is_nfc, is_nfc_quick, is_nfc_stream_safe, is_nfc_stream_safe_quick, is_nfd, is_nfd_quick, + is_nfd_stream_safe, is_nfd_stream_safe_quick, is_nfkc, is_nfkc_quick, is_nfkd, is_nfkd_quick, IsNormalized, - is_nfc, - is_nfc_quick, - is_nfkc, - is_nfkc_quick, - is_nfc_stream_safe, - is_nfc_stream_safe_quick, - is_nfd, - is_nfd_quick, - is_nfkd, - is_nfkd_quick, - is_nfd_stream_safe, - is_nfd_stream_safe_quick, }; -pub use recompose::Recompositions; -pub use stream_safe::StreamSafe; -use std::str::Chars; +pub use crate::recompose::Recompositions; +pub use crate::stream_safe::StreamSafe; +pub use crate::tables::UNICODE_VERSION; +use core::str::Chars; + +mod no_std_prelude; mod decompose; mod lookups; mod normalize; mod perfect_hash; -mod recompose; mod quick_check; +mod recompose; mod stream_safe; + +#[rustfmt::skip] mod tables; -#[cfg(test)] -mod test; #[doc(hidden)] pub mod __test_api; +#[cfg(test)] +mod test; /// Methods for composing and decomposing characters. pub mod char { - pub use normalize::{decompose_canonical, decompose_compatible, compose}; + pub use crate::normalize::{compose, decompose_canonical, decompose_compatible}; - pub use lookups::{canonical_combining_class, is_combining_mark}; + pub use crate::lookups::{canonical_combining_class, is_combining_mark}; } - /// Methods for iterating over strings while applying Unicode normalizations /// as described in /// [Unicode Standard Annex #15](http://www.unicode.org/reports/tr15/). -pub trait UnicodeNormalization> { +pub trait UnicodeNormalization> { /// Returns an iterator over the string in Unicode Normalization Form D /// (canonical decomposition). - #[inline] fn nfd(self) -> Decompositions; /// Returns an iterator over the string in Unicode Normalization Form KD /// (compatibility decomposition). - #[inline] fn nfkd(self) -> Decompositions; /// An Iterator over the string in Unicode Normalization Form C /// (canonical decomposition followed by canonical composition). - #[inline] fn nfc(self) -> Recompositions; /// An Iterator over the string in Unicode Normalization Form KC /// (compatibility decomposition followed by canonical composition). - #[inline] fn nfkc(self) -> Recompositions; /// An Iterator over the string with Conjoining Grapheme Joiner characters /// inserted according to the Stream-Safe Text Process (UAX15-D4) - #[inline] fn stream_safe(self) -> StreamSafe; } @@ -143,7 +140,7 @@ } } -impl> UnicodeNormalization for I { +impl> UnicodeNormalization for I { #[inline] fn nfd(self) -> Decompositions { decompose::new_canonical(self) diff -Nru cargo-0.44.1/vendor/unicode-normalization/src/lookups.rs cargo-0.47.0/vendor/unicode-normalization/src/lookups.rs --- cargo-0.44.1/vendor/unicode-normalization/src/lookups.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/src/lookups.rs 2020-10-01 21:38:28.000000000 +0000 @@ -10,46 +10,81 @@ //! Lookups of unicode properties using minimal perfect hashing. -use perfect_hash::mph_lookup; -use tables::*; +use crate::perfect_hash::mph_lookup; +use crate::tables::*; /// Look up the canonical combining class for a codepoint. -/// +/// /// The value returned is as defined in the Unicode Character Database. pub fn canonical_combining_class(c: char) -> u8 { - mph_lookup(c.into(), CANONICAL_COMBINING_CLASS_SALT, CANONICAL_COMBINING_CLASS_KV, - u8_lookup_fk, u8_lookup_fv, 0) + mph_lookup( + c.into(), + CANONICAL_COMBINING_CLASS_SALT, + CANONICAL_COMBINING_CLASS_KV, + u8_lookup_fk, + u8_lookup_fv, + 0, + ) } pub(crate) fn composition_table(c1: char, c2: char) -> Option { if c1 < '\u{10000}' && c2 < '\u{10000}' { - mph_lookup((c1 as u32) << 16 | (c2 as u32), - COMPOSITION_TABLE_SALT, COMPOSITION_TABLE_KV, - pair_lookup_fk, pair_lookup_fv_opt, None) + mph_lookup( + (c1 as u32) << 16 | (c2 as u32), + COMPOSITION_TABLE_SALT, + COMPOSITION_TABLE_KV, + pair_lookup_fk, + pair_lookup_fv_opt, + None, + ) } else { composition_table_astral(c1, c2) } } pub(crate) fn canonical_fully_decomposed(c: char) -> Option<&'static [char]> { - mph_lookup(c.into(), CANONICAL_DECOMPOSED_SALT, CANONICAL_DECOMPOSED_KV, - pair_lookup_fk, pair_lookup_fv_opt, None) + mph_lookup( + c.into(), + CANONICAL_DECOMPOSED_SALT, + CANONICAL_DECOMPOSED_KV, + pair_lookup_fk, + pair_lookup_fv_opt, + None, + ) } pub(crate) fn compatibility_fully_decomposed(c: char) -> Option<&'static [char]> { - mph_lookup(c.into(), COMPATIBILITY_DECOMPOSED_SALT, COMPATIBILITY_DECOMPOSED_KV, - pair_lookup_fk, pair_lookup_fv_opt, None) + mph_lookup( + c.into(), + COMPATIBILITY_DECOMPOSED_SALT, + COMPATIBILITY_DECOMPOSED_KV, + pair_lookup_fk, + pair_lookup_fv_opt, + None, + ) } /// Return whether the given character is a combining mark (`General_Category=Mark`) pub fn is_combining_mark(c: char) -> bool { - mph_lookup(c.into(), COMBINING_MARK_SALT, COMBINING_MARK_KV, - bool_lookup_fk, bool_lookup_fv, false) + mph_lookup( + c.into(), + COMBINING_MARK_SALT, + COMBINING_MARK_KV, + bool_lookup_fk, + bool_lookup_fv, + false, + ) } pub fn stream_safe_trailing_nonstarters(c: char) -> usize { - mph_lookup(c.into(), TRAILING_NONSTARTERS_SALT, TRAILING_NONSTARTERS_KV, - u8_lookup_fk, u8_lookup_fv, 0) as usize + mph_lookup( + c.into(), + TRAILING_NONSTARTERS_SALT, + TRAILING_NONSTARTERS_KV, + u8_lookup_fk, + u8_lookup_fv, + 0, + ) as usize } /// Extract the key in a 24 bit key and 8 bit value packed in a u32. diff -Nru cargo-0.44.1/vendor/unicode-normalization/src/normalize.rs cargo-0.47.0/vendor/unicode-normalization/src/normalize.rs --- cargo-0.44.1/vendor/unicode-normalization/src/normalize.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/src/normalize.rs 2020-10-01 21:38:28.000000000 +0000 @@ -9,15 +9,20 @@ // except according to those terms. //! Functions for computing canonical and compatible decompositions for Unicode characters. -use std::char; -use std::ops::FnMut; -use lookups::{canonical_fully_decomposed, composition_table, compatibility_fully_decomposed}; +use crate::lookups::{ + canonical_fully_decomposed, compatibility_fully_decomposed, composition_table, +}; + +use core::{char, ops::FnMut}; /// Compute canonical Unicode decomposition for character. /// See [Unicode Standard Annex #15](http://www.unicode.org/reports/tr15/) /// for more information. #[inline] -pub fn decompose_canonical(c: char, emit_char: F) where F: FnMut(char) { +pub fn decompose_canonical(c: char, emit_char: F) +where + F: FnMut(char), +{ decompose(c, canonical_fully_decomposed, emit_char) } @@ -26,14 +31,16 @@ /// for more information. #[inline] pub fn decompose_compatible(c: char, emit_char: F) { - let decompose_char = |c| compatibility_fully_decomposed(c) - .or_else(|| canonical_fully_decomposed(c)); + let decompose_char = + |c| compatibility_fully_decomposed(c).or_else(|| canonical_fully_decomposed(c)); decompose(c, decompose_char, emit_char) } #[inline] fn decompose(c: char, decompose_char: D, mut emit_char: F) - where D: Fn(char) -> Option<&'static [char]>, F: FnMut(char) +where + D: Fn(char) -> Option<&'static [char]>, + F: FnMut(char), { // 7-bit ASCII never decomposes if c <= '\x7f' { @@ -74,8 +81,8 @@ const L_COUNT: u32 = 19; const V_COUNT: u32 = 21; const T_COUNT: u32 = 28; -const N_COUNT: u32 = (V_COUNT * T_COUNT); -const S_COUNT: u32 = (L_COUNT * N_COUNT); +const N_COUNT: u32 = V_COUNT * T_COUNT; +const S_COUNT: u32 = L_COUNT * N_COUNT; const S_LAST: u32 = S_BASE + S_COUNT - 1; const L_LAST: u32 = L_BASE + L_COUNT - 1; @@ -93,7 +100,10 @@ // Decompose a precomposed Hangul syllable #[allow(unsafe_code)] #[inline(always)] -fn decompose_hangul(s: char, mut emit_char: F) where F: FnMut(char) { +fn decompose_hangul(s: char, mut emit_char: F) +where + F: FnMut(char), +{ let s_index = s as u32 - S_BASE; let l_index = s_index / N_COUNT; unsafe { @@ -113,7 +123,11 @@ pub(crate) fn hangul_decomposition_length(s: char) -> usize { let si = s as u32 - S_BASE; let ti = si % T_COUNT; - if ti > 0 { 3 } else { 2 } + if ti > 0 { + 3 + } else { + 2 + } } // Compose a pair of Hangul Jamo @@ -124,17 +138,17 @@ let (a, b) = (a as u32, b as u32); match (a, b) { // Compose a leading consonant and a vowel together into an LV_Syllable - (L_BASE ... L_LAST, V_BASE ... V_LAST) => { + (L_BASE...L_LAST, V_BASE...V_LAST) => { let l_index = a - L_BASE; let v_index = b - V_BASE; let lv_index = l_index * N_COUNT + v_index * T_COUNT; let s = S_BASE + lv_index; - Some(unsafe {char::from_u32_unchecked(s)}) - }, + Some(unsafe { char::from_u32_unchecked(s) }) + } // Compose an LV_Syllable and a trailing consonant into an LVT_Syllable - (S_BASE ... S_LAST, T_FIRST ... T_LAST) if (a - S_BASE) % T_COUNT == 0 => { - Some(unsafe {char::from_u32_unchecked(a + (b - T_BASE))}) - }, + (S_BASE...S_LAST, T_FIRST...T_LAST) if (a - S_BASE) % T_COUNT == 0 => { + Some(unsafe { char::from_u32_unchecked(a + (b - T_BASE)) }) + } _ => None, } } diff -Nru cargo-0.44.1/vendor/unicode-normalization/src/no_std_prelude.rs cargo-0.47.0/vendor/unicode-normalization/src/no_std_prelude.rs --- cargo-0.44.1/vendor/unicode-normalization/src/no_std_prelude.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/src/no_std_prelude.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,6 @@ +#[cfg(not(feature = "std"))] +pub use alloc::{ + str::Chars, + string::{String, ToString}, + vec::Vec, +}; diff -Nru cargo-0.44.1/vendor/unicode-normalization/src/perfect_hash.rs cargo-0.47.0/vendor/unicode-normalization/src/perfect_hash.rs --- cargo-0.44.1/vendor/unicode-normalization/src/perfect_hash.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/src/perfect_hash.rs 2020-10-01 21:38:28.000000000 +0000 @@ -20,16 +20,25 @@ } /// Do a lookup using minimal perfect hashing. -/// +/// /// The table is stored as a sequence of "salt" values, then a sequence of /// values that contain packed key/value pairs. The strategy is to hash twice. /// The first hash retrieves a salt value that makes the second hash unique. /// The hash function doesn't have to be very good, just good enough that the /// resulting map is unique. #[inline] -pub(crate) fn mph_lookup(x: u32, salt: &[u16], kv: &[KV], fk: FK, fv: FV, - default: V) -> V - where KV: Copy, FK: Fn(KV) -> u32, FV: Fn(KV) -> V +pub(crate) fn mph_lookup( + x: u32, + salt: &[u16], + kv: &[KV], + fk: FK, + fv: FV, + default: V, +) -> V +where + KV: Copy, + FK: Fn(KV) -> u32, + FV: Fn(KV) -> V, { let s = salt[my_hash(x, 0, salt.len())] as u32; let key_val = kv[my_hash(x, s, salt.len())]; diff -Nru cargo-0.44.1/vendor/unicode-normalization/src/quick_check.rs cargo-0.47.0/vendor/unicode-normalization/src/quick_check.rs --- cargo-0.44.1/vendor/unicode-normalization/src/quick_check.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/src/quick_check.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,7 +1,7 @@ -use UnicodeNormalization; -use lookups::canonical_combining_class; -use stream_safe; -use tables; +use crate::lookups::canonical_combining_class; +use crate::stream_safe; +use crate::tables; +use crate::UnicodeNormalization; /// The QuickCheck algorithm can quickly determine if a text is or isn't /// normalized without any allocations in many cases, but it has to be able to @@ -19,7 +19,9 @@ // https://unicode.org/reports/tr15/#Detecting_Normalization_Forms #[inline] fn quick_check(s: I, is_allowed: F, stream_safe: bool) -> IsNormalized - where I: Iterator, F: Fn(char) -> IsNormalized +where + I: Iterator, + F: Fn(char) -> IsNormalized, { let mut last_cc = 0u8; let mut nonstarter_count = 0; @@ -42,7 +44,7 @@ IsNormalized::No => return IsNormalized::No, IsNormalized::Maybe => { result = IsNormalized::Maybe; - }, + } } if stream_safe { let decomp = stream_safe::classify_nonstarters(ch); @@ -67,38 +69,37 @@ /// `IsNormalized::Maybe` if further checks are necessary. In this case a check /// like `s.chars().nfc().eq(s.chars())` should suffice. #[inline] -pub fn is_nfc_quick>(s: I) -> IsNormalized { +pub fn is_nfc_quick>(s: I) -> IsNormalized { quick_check(s, tables::qc_nfc, false) } - /// Quickly check if a string is in NFKC. #[inline] -pub fn is_nfkc_quick>(s: I) -> IsNormalized { +pub fn is_nfkc_quick>(s: I) -> IsNormalized { quick_check(s, tables::qc_nfkc, false) } /// Quickly check if a string is in NFD. #[inline] -pub fn is_nfd_quick>(s: I) -> IsNormalized { +pub fn is_nfd_quick>(s: I) -> IsNormalized { quick_check(s, tables::qc_nfd, false) } /// Quickly check if a string is in NFKD. #[inline] -pub fn is_nfkd_quick>(s: I) -> IsNormalized { +pub fn is_nfkd_quick>(s: I) -> IsNormalized { quick_check(s, tables::qc_nfkd, false) } /// Quickly check if a string is Stream-Safe NFC. #[inline] -pub fn is_nfc_stream_safe_quick>(s: I) -> IsNormalized { +pub fn is_nfc_stream_safe_quick>(s: I) -> IsNormalized { quick_check(s, tables::qc_nfc, true) } /// Quickly check if a string is Stream-Safe NFD. #[inline] -pub fn is_nfd_stream_safe_quick>(s: I) -> IsNormalized { +pub fn is_nfd_stream_safe_quick>(s: I) -> IsNormalized { quick_check(s, tables::qc_nfd, true) } @@ -164,11 +165,7 @@ #[cfg(test)] mod tests { - use super::{ - IsNormalized, - is_nfc_stream_safe_quick, - is_nfd_stream_safe_quick, - }; + use super::{is_nfc_stream_safe_quick, is_nfd_stream_safe_quick, IsNormalized}; #[test] fn test_stream_safe_nfd() { diff -Nru cargo-0.44.1/vendor/unicode-normalization/src/recompose.rs cargo-0.47.0/vendor/unicode-normalization/src/recompose.rs --- cargo-0.44.1/vendor/unicode-normalization/src/recompose.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/src/recompose.rs 2020-10-01 21:38:28.000000000 +0000 @@ -8,9 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use decompose::Decompositions; -use smallvec::SmallVec; -use std::fmt::{self, Write}; +use crate::decompose::Decompositions; +use core::fmt::{self, Write}; +use tinyvec::TinyVec; #[derive(Clone)] enum RecompositionState { @@ -24,34 +24,34 @@ pub struct Recompositions { iter: Decompositions, state: RecompositionState, - buffer: SmallVec<[char; 4]>, + buffer: TinyVec<[char; 4]>, composee: Option, last_ccc: Option, } #[inline] -pub fn new_canonical>(iter: I) -> Recompositions { +pub fn new_canonical>(iter: I) -> Recompositions { Recompositions { iter: super::decompose::new_canonical(iter), state: self::RecompositionState::Composing, - buffer: SmallVec::new(), + buffer: TinyVec::new(), composee: None, last_ccc: None, } } #[inline] -pub fn new_compatible>(iter: I) -> Recompositions { +pub fn new_compatible>(iter: I) -> Recompositions { Recompositions { iter: super::decompose::new_compatible(iter), state: self::RecompositionState::Composing, - buffer: SmallVec::new(), + buffer: TinyVec::new(), composee: None, last_ccc: None, } } -impl> Iterator for Recompositions { +impl> Iterator for Recompositions { type Item = char; #[inline] @@ -70,26 +70,24 @@ } self.composee = Some(ch); continue; - }, + } Some(k) => k, }; match self.last_ccc { - None => { - match super::char::compose(k, ch) { - Some(r) => { - self.composee = Some(r); - continue; - } - None => { - if ch_class == 0 { - self.composee = Some(ch); - return Some(k); - } - self.buffer.push(ch); - self.last_ccc = Some(ch_class); + None => match super::char::compose(k, ch) { + Some(r) => { + self.composee = Some(r); + continue; + } + None => { + if ch_class == 0 { + self.composee = Some(ch); + return Some(k); } + self.buffer.push(ch); + self.last_ccc = Some(ch_class); } - } + }, Some(l_class) => { if l_class >= ch_class { // `ch` is blocked from `composee` @@ -121,36 +119,32 @@ return self.composee.take(); } } - Purging(next) => { - match self.buffer.get(next).cloned() { - None => { - self.buffer.clear(); - self.state = Composing; - } - s => { - self.state = Purging(next + 1); - return s - } + Purging(next) => match self.buffer.get(next).cloned() { + None => { + self.buffer.clear(); + self.state = Composing; } - } - Finished(next) => { - match self.buffer.get(next).cloned() { - None => { - self.buffer.clear(); - return self.composee.take() - } - s => { - self.state = Finished(next + 1); - return s - } + s => { + self.state = Purging(next + 1); + return s; } - } + }, + Finished(next) => match self.buffer.get(next).cloned() { + None => { + self.buffer.clear(); + return self.composee.take(); + } + s => { + self.state = Finished(next + 1); + return s; + } + }, } } } } -impl + Clone> fmt::Display for Recompositions { +impl + Clone> fmt::Display for Recompositions { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { for c in self.clone() { f.write_char(c)?; diff -Nru cargo-0.44.1/vendor/unicode-normalization/src/stream_safe.rs cargo-0.47.0/vendor/unicode-normalization/src/stream_safe.rs --- cargo-0.44.1/vendor/unicode-normalization/src/stream_safe.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/src/stream_safe.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,12 +1,9 @@ -use normalize::{ - hangul_decomposition_length, - is_hangul_syllable, -}; -use lookups::{ +use crate::lookups::{ canonical_combining_class, canonical_fully_decomposed, compatibility_fully_decomposed, stream_safe_trailing_nonstarters, }; -use tables::stream_safe_leading_nonstarters; +use crate::normalize::{hangul_decomposition_length, is_hangul_syllable}; +use crate::tables::stream_safe_leading_nonstarters; pub(crate) const MAX_NONSTARTERS: usize = 30; const COMBINING_GRAPHEME_JOINER: char = '\u{034F}'; @@ -22,11 +19,15 @@ impl StreamSafe { pub(crate) fn new(iter: I) -> Self { - Self { iter, nonstarter_count: 0, buffer: None } + Self { + iter, + nonstarter_count: 0, + buffer: None, + } } } -impl> Iterator for StreamSafe { +impl> Iterator for StreamSafe { type Item = char; #[inline] @@ -72,7 +73,7 @@ leading_nonstarters: 0, trailing_nonstarters: 0, decomposition_len: 1, - } + }; } // Next, special case Hangul, since it's not handled by our tables. if is_hangul_syllable(c) { @@ -82,15 +83,12 @@ decomposition_len: hangul_decomposition_length(c), }; } - let decomp = compatibility_fully_decomposed(c) - .or_else(|| canonical_fully_decomposed(c)); + let decomp = compatibility_fully_decomposed(c).or_else(|| canonical_fully_decomposed(c)); match decomp { - Some(decomp) => { - Decomposition { - leading_nonstarters: stream_safe_leading_nonstarters(c), - trailing_nonstarters: stream_safe_trailing_nonstarters(c), - decomposition_len: decomp.len(), - } + Some(decomp) => Decomposition { + leading_nonstarters: stream_safe_leading_nonstarters(c), + trailing_nonstarters: stream_safe_trailing_nonstarters(c), + decomposition_len: decomp.len(), }, None => { let is_nonstarter = canonical_combining_class(c) != 0; @@ -106,13 +104,14 @@ #[cfg(test)] mod tests { - use super::{ - StreamSafe, - classify_nonstarters, - }; - use std::char; - use normalize::decompose_compatible; - use lookups::canonical_combining_class; + use super::{classify_nonstarters, StreamSafe}; + use crate::lookups::canonical_combining_class; + use crate::normalize::decompose_compatible; + + #[cfg(not(feature = "std"))] + use crate::no_std_prelude::*; + + use core::char; fn stream_safe(s: &str) -> String { StreamSafe::new(s.chars()).collect() @@ -136,7 +135,7 @@ None => continue, }; let c = classify_nonstarters(ch); - let mut s = vec![]; + let mut s = Vec::new(); decompose_compatible(ch, |c| s.push(c)); assert_eq!(s.len(), c.decomposition_len); diff -Nru cargo-0.44.1/vendor/unicode-normalization/src/tables.rs cargo-0.47.0/vendor/unicode-normalization/src/tables.rs --- cargo-0.44.1/vendor/unicode-normalization/src/tables.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/src/tables.rs 2020-10-01 21:38:28.000000000 +0000 @@ -11,8 +11,8 @@ // NOTE: The following code was generated by "scripts/unicode.py", do not edit directly #![allow(missing_docs)] -use quick_check::IsNormalized; -use quick_check::IsNormalized::*; +use crate::quick_check::IsNormalized; +use crate::quick_check::IsNormalized::*; #[allow(unused)] pub const UNICODE_VERSION: (u64, u64, u64) = (9, 0, 0); diff -Nru cargo-0.44.1/vendor/unicode-normalization/src/__test_api.rs cargo-0.47.0/vendor/unicode-normalization/src/__test_api.rs --- cargo-0.44.1/vendor/unicode-normalization/src/__test_api.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/src/__test_api.rs 2020-10-01 21:38:28.000000000 +0000 @@ -4,10 +4,15 @@ // // If you're caught using this outside this crates tests/, you get to clean up the mess. +#[cfg(not(feature = "std"))] +use crate::no_std_prelude::*; + use crate::stream_safe::StreamSafe; + pub fn stream_safe(s: &str) -> String { - StreamSafe::new(s.chars()).collect() + StreamSafe::new(s.chars()).collect() } + pub mod quick_check { pub use crate::quick_check::*; } diff -Nru cargo-0.44.1/vendor/unicode-normalization/src/test.rs cargo-0.47.0/vendor/unicode-normalization/src/test.rs --- cargo-0.44.1/vendor/unicode-normalization/src/test.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-normalization/src/test.rs 2020-10-01 21:38:28.000000000 +0000 @@ -8,11 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - -use std::char; -use super::UnicodeNormalization; use super::char::is_combining_mark; +use super::UnicodeNormalization; +use core::char; +#[cfg(not(feature = "std"))] +use crate::no_std_prelude::*; #[test] fn test_nfd() { @@ -21,8 +22,11 @@ assert_eq!($input.nfd().to_string(), $expected); // A dummy iterator that is not std::str::Chars directly; // note that `id_func` is used to ensure `Clone` implementation - assert_eq!($input.chars().map(|c| c).nfd().collect::(), $expected); - } + assert_eq!( + $input.chars().map(|c| c).nfd().collect::(), + $expected + ); + }; } t!("abc", "abc"); t!("\u{1e0b}\u{1c4}", "d\u{307}\u{1c4}"); @@ -41,7 +45,7 @@ macro_rules! t { ($input: expr, $expected: expr) => { assert_eq!($input.nfkd().to_string(), $expected); - } + }; } t!("abc", "abc"); t!("\u{1e0b}\u{1c4}", "d\u{307}DZ\u{30c}"); @@ -60,7 +64,7 @@ macro_rules! t { ($input: expr, $expected: expr) => { assert_eq!($input.nfc().to_string(), $expected); - } + }; } t!("abc", "abc"); t!("\u{1e0b}\u{1c4}", "\u{1e0b}\u{1c4}"); @@ -72,7 +76,10 @@ t!("\u{301}a", "\u{301}a"); t!("\u{d4db}", "\u{d4db}"); t!("\u{ac1c}", "\u{ac1c}"); - t!("a\u{300}\u{305}\u{315}\u{5ae}b", "\u{e0}\u{5ae}\u{305}\u{315}b"); + t!( + "a\u{300}\u{305}\u{315}\u{5ae}b", + "\u{e0}\u{5ae}\u{305}\u{315}b" + ); } #[test] @@ -80,7 +87,7 @@ macro_rules! t { ($input: expr, $expected: expr) => { assert_eq!($input.nfkc().to_string(), $expected); - } + }; } t!("abc", "abc"); t!("\u{1e0b}\u{1c4}", "\u{1e0b}D\u{17d}"); @@ -92,7 +99,10 @@ t!("\u{301}a", "\u{301}a"); t!("\u{d4db}", "\u{d4db}"); t!("\u{ac1c}", "\u{ac1c}"); - t!("a\u{300}\u{305}\u{315}\u{5ae}b", "\u{e0}\u{5ae}\u{305}\u{315}b"); + t!( + "a\u{300}\u{305}\u{315}\u{5ae}b", + "\u{e0}\u{5ae}\u{305}\u{315}b" + ); } #[test] diff -Nru cargo-0.44.1/vendor/unicode-width/.cargo-checksum.json cargo-0.47.0/vendor/unicode-width/.cargo-checksum.json --- cargo-0.44.1/vendor/unicode-width/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-width/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479"} \ No newline at end of file +{"files":{},"package":"9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/unicode-width/Cargo.toml cargo-0.47.0/vendor/unicode-width/Cargo.toml --- cargo-0.44.1/vendor/unicode-width/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-width/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "unicode-width" -version = "0.1.7" +version = "0.1.8" authors = ["kwantam ", "Manish Goregaokar "] exclude = ["target/*", "Cargo.lock"] description = "Determine displayed width of `char` and `str` types\naccording to Unicode Standard Annex #11 rules.\n" diff -Nru cargo-0.44.1/vendor/unicode-width/.pc/drop-uninstallable-featuresets.patch/Cargo.toml cargo-0.47.0/vendor/unicode-width/.pc/drop-uninstallable-featuresets.patch/Cargo.toml --- cargo-0.44.1/vendor/unicode-width/.pc/drop-uninstallable-featuresets.patch/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-width/.pc/drop-uninstallable-featuresets.patch/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "unicode-width" -version = "0.1.7" +version = "0.1.8" authors = ["kwantam ", "Manish Goregaokar "] exclude = ["target/*", "Cargo.lock"] description = "Determine displayed width of `char` and `str` types\naccording to Unicode Standard Annex #11 rules.\n" diff -Nru cargo-0.44.1/vendor/unicode-width/scripts/unicode.py cargo-0.47.0/vendor/unicode-width/scripts/unicode.py --- cargo-0.44.1/vendor/unicode-width/scripts/unicode.py 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-width/scripts/unicode.py 2020-10-01 21:38:28.000000000 +0000 @@ -292,7 +292,7 @@ rf.write(""" /// The version of [Unicode](http://www.unicode.org/) /// that this version of unicode-width is based on. -pub const UNICODE_VERSION: (u64, u64, u64) = (%s, %s, %s); +pub const UNICODE_VERSION: (u8, u8, u8) = (%s, %s, %s); """ % unicode_version) gencats = load_unicode_data("UnicodeData.txt") diff -Nru cargo-0.44.1/vendor/unicode-width/src/tables.rs cargo-0.47.0/vendor/unicode-width/src/tables.rs --- cargo-0.44.1/vendor/unicode-width/src/tables.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-width/src/tables.rs 2020-10-01 21:38:28.000000000 +0000 @@ -14,7 +14,7 @@ /// The version of [Unicode](http://www.unicode.org/) /// that this version of unicode-width is based on. -pub const UNICODE_VERSION: (u64, u64, u64) = (12, 1, 0); +pub const UNICODE_VERSION: (u8, u8, u8) = (13, 0, 0); pub mod charwidth { use core::option::Option::{self, Some, None}; @@ -90,194 +90,198 @@ ('\u{ac7}', '\u{ac8}', 0, 0), ('\u{acd}', '\u{acd}', 0, 0), ('\u{ae2}', '\u{ae3}', 0, 0), ('\u{afa}', '\u{aff}', 0, 0), ('\u{b01}', '\u{b01}', 0, 0), ('\u{b3c}', '\u{b3c}', 0, 0), ('\u{b3f}', '\u{b3f}', 0, 0), ('\u{b41}', '\u{b44}', 0, 0), ('\u{b4d}', '\u{b4d}', 0, 0), - ('\u{b56}', '\u{b56}', 0, 0), ('\u{b62}', '\u{b63}', 0, 0), ('\u{b82}', '\u{b82}', 0, 0), + ('\u{b55}', '\u{b56}', 0, 0), ('\u{b62}', '\u{b63}', 0, 0), ('\u{b82}', '\u{b82}', 0, 0), ('\u{bc0}', '\u{bc0}', 0, 0), ('\u{bcd}', '\u{bcd}', 0, 0), ('\u{c00}', '\u{c00}', 0, 0), ('\u{c04}', '\u{c04}', 0, 0), ('\u{c3e}', '\u{c40}', 0, 0), ('\u{c46}', '\u{c48}', 0, 0), ('\u{c4a}', '\u{c4d}', 0, 0), ('\u{c55}', '\u{c56}', 0, 0), ('\u{c62}', '\u{c63}', 0, 0), ('\u{c81}', '\u{c81}', 0, 0), ('\u{cbc}', '\u{cbc}', 0, 0), ('\u{cbf}', '\u{cbf}', 0, 0), ('\u{cc6}', '\u{cc6}', 0, 0), ('\u{ccc}', '\u{ccd}', 0, 0), ('\u{ce2}', '\u{ce3}', 0, 0), ('\u{d00}', '\u{d01}', 0, 0), ('\u{d3b}', '\u{d3c}', 0, 0), ('\u{d41}', '\u{d44}', 0, 0), - ('\u{d4d}', '\u{d4d}', 0, 0), ('\u{d62}', '\u{d63}', 0, 0), ('\u{dca}', '\u{dca}', 0, 0), - ('\u{dd2}', '\u{dd4}', 0, 0), ('\u{dd6}', '\u{dd6}', 0, 0), ('\u{e31}', '\u{e31}', 0, 0), - ('\u{e34}', '\u{e3a}', 0, 0), ('\u{e47}', '\u{e4e}', 0, 0), ('\u{eb1}', '\u{eb1}', 0, 0), - ('\u{eb4}', '\u{ebc}', 0, 0), ('\u{ec8}', '\u{ecd}', 0, 0), ('\u{f18}', '\u{f19}', 0, 0), - ('\u{f35}', '\u{f35}', 0, 0), ('\u{f37}', '\u{f37}', 0, 0), ('\u{f39}', '\u{f39}', 0, 0), - ('\u{f71}', '\u{f7e}', 0, 0), ('\u{f80}', '\u{f84}', 0, 0), ('\u{f86}', '\u{f87}', 0, 0), - ('\u{f8d}', '\u{f97}', 0, 0), ('\u{f99}', '\u{fbc}', 0, 0), ('\u{fc6}', '\u{fc6}', 0, 0), - ('\u{102d}', '\u{1030}', 0, 0), ('\u{1032}', '\u{1037}', 0, 0), ('\u{1039}', '\u{103a}', 0, - 0), ('\u{103d}', '\u{103e}', 0, 0), ('\u{1058}', '\u{1059}', 0, 0), ('\u{105e}', '\u{1060}', - 0, 0), ('\u{1071}', '\u{1074}', 0, 0), ('\u{1082}', '\u{1082}', 0, 0), ('\u{1085}', - '\u{1086}', 0, 0), ('\u{108d}', '\u{108d}', 0, 0), ('\u{109d}', '\u{109d}', 0, 0), - ('\u{1100}', '\u{115f}', 2, 2), ('\u{1160}', '\u{11ff}', 0, 0), ('\u{135d}', '\u{135f}', 0, - 0), ('\u{1712}', '\u{1714}', 0, 0), ('\u{1732}', '\u{1734}', 0, 0), ('\u{1752}', '\u{1753}', - 0, 0), ('\u{1772}', '\u{1773}', 0, 0), ('\u{17b4}', '\u{17b5}', 0, 0), ('\u{17b7}', - '\u{17bd}', 0, 0), ('\u{17c6}', '\u{17c6}', 0, 0), ('\u{17c9}', '\u{17d3}', 0, 0), - ('\u{17dd}', '\u{17dd}', 0, 0), ('\u{180b}', '\u{180e}', 0, 0), ('\u{1885}', '\u{1886}', 0, - 0), ('\u{18a9}', '\u{18a9}', 0, 0), ('\u{1920}', '\u{1922}', 0, 0), ('\u{1927}', '\u{1928}', - 0, 0), ('\u{1932}', '\u{1932}', 0, 0), ('\u{1939}', '\u{193b}', 0, 0), ('\u{1a17}', - '\u{1a18}', 0, 0), ('\u{1a1b}', '\u{1a1b}', 0, 0), ('\u{1a56}', '\u{1a56}', 0, 0), - ('\u{1a58}', '\u{1a5e}', 0, 0), ('\u{1a60}', '\u{1a60}', 0, 0), ('\u{1a62}', '\u{1a62}', 0, - 0), ('\u{1a65}', '\u{1a6c}', 0, 0), ('\u{1a73}', '\u{1a7c}', 0, 0), ('\u{1a7f}', '\u{1a7f}', - 0, 0), ('\u{1ab0}', '\u{1abe}', 0, 0), ('\u{1b00}', '\u{1b03}', 0, 0), ('\u{1b34}', - '\u{1b34}', 0, 0), ('\u{1b36}', '\u{1b3a}', 0, 0), ('\u{1b3c}', '\u{1b3c}', 0, 0), - ('\u{1b42}', '\u{1b42}', 0, 0), ('\u{1b6b}', '\u{1b73}', 0, 0), ('\u{1b80}', '\u{1b81}', 0, - 0), ('\u{1ba2}', '\u{1ba5}', 0, 0), ('\u{1ba8}', '\u{1ba9}', 0, 0), ('\u{1bab}', '\u{1bad}', - 0, 0), ('\u{1be6}', '\u{1be6}', 0, 0), ('\u{1be8}', '\u{1be9}', 0, 0), ('\u{1bed}', - '\u{1bed}', 0, 0), ('\u{1bef}', '\u{1bf1}', 0, 0), ('\u{1c2c}', '\u{1c33}', 0, 0), - ('\u{1c36}', '\u{1c37}', 0, 0), ('\u{1cd0}', '\u{1cd2}', 0, 0), ('\u{1cd4}', '\u{1ce0}', 0, - 0), ('\u{1ce2}', '\u{1ce8}', 0, 0), ('\u{1ced}', '\u{1ced}', 0, 0), ('\u{1cf4}', '\u{1cf4}', - 0, 0), ('\u{1cf8}', '\u{1cf9}', 0, 0), ('\u{1dc0}', '\u{1df9}', 0, 0), ('\u{1dfb}', - '\u{1dff}', 0, 0), ('\u{200b}', '\u{200f}', 0, 0), ('\u{2010}', '\u{2010}', 1, 2), - ('\u{2013}', '\u{2016}', 1, 2), ('\u{2018}', '\u{2019}', 1, 2), ('\u{201c}', '\u{201d}', 1, - 2), ('\u{2020}', '\u{2022}', 1, 2), ('\u{2024}', '\u{2027}', 1, 2), ('\u{202a}', '\u{202e}', - 0, 0), ('\u{2030}', '\u{2030}', 1, 2), ('\u{2032}', '\u{2033}', 1, 2), ('\u{2035}', - '\u{2035}', 1, 2), ('\u{203b}', '\u{203b}', 1, 2), ('\u{203e}', '\u{203e}', 1, 2), - ('\u{2060}', '\u{2064}', 0, 0), ('\u{2066}', '\u{206f}', 0, 0), ('\u{2074}', '\u{2074}', 1, - 2), ('\u{207f}', '\u{207f}', 1, 2), ('\u{2081}', '\u{2084}', 1, 2), ('\u{20ac}', '\u{20ac}', - 1, 2), ('\u{20d0}', '\u{20f0}', 0, 0), ('\u{2103}', '\u{2103}', 1, 2), ('\u{2105}', - '\u{2105}', 1, 2), ('\u{2109}', '\u{2109}', 1, 2), ('\u{2113}', '\u{2113}', 1, 2), - ('\u{2116}', '\u{2116}', 1, 2), ('\u{2121}', '\u{2122}', 1, 2), ('\u{2126}', '\u{2126}', 1, - 2), ('\u{212b}', '\u{212b}', 1, 2), ('\u{2153}', '\u{2154}', 1, 2), ('\u{215b}', '\u{215e}', - 1, 2), ('\u{2160}', '\u{216b}', 1, 2), ('\u{2170}', '\u{2179}', 1, 2), ('\u{2189}', - '\u{2189}', 1, 2), ('\u{2190}', '\u{2199}', 1, 2), ('\u{21b8}', '\u{21b9}', 1, 2), - ('\u{21d2}', '\u{21d2}', 1, 2), ('\u{21d4}', '\u{21d4}', 1, 2), ('\u{21e7}', '\u{21e7}', 1, - 2), ('\u{2200}', '\u{2200}', 1, 2), ('\u{2202}', '\u{2203}', 1, 2), ('\u{2207}', '\u{2208}', - 1, 2), ('\u{220b}', '\u{220b}', 1, 2), ('\u{220f}', '\u{220f}', 1, 2), ('\u{2211}', - '\u{2211}', 1, 2), ('\u{2215}', '\u{2215}', 1, 2), ('\u{221a}', '\u{221a}', 1, 2), - ('\u{221d}', '\u{2220}', 1, 2), ('\u{2223}', '\u{2223}', 1, 2), ('\u{2225}', '\u{2225}', 1, - 2), ('\u{2227}', '\u{222c}', 1, 2), ('\u{222e}', '\u{222e}', 1, 2), ('\u{2234}', '\u{2237}', - 1, 2), ('\u{223c}', '\u{223d}', 1, 2), ('\u{2248}', '\u{2248}', 1, 2), ('\u{224c}', - '\u{224c}', 1, 2), ('\u{2252}', '\u{2252}', 1, 2), ('\u{2260}', '\u{2261}', 1, 2), - ('\u{2264}', '\u{2267}', 1, 2), ('\u{226a}', '\u{226b}', 1, 2), ('\u{226e}', '\u{226f}', 1, - 2), ('\u{2282}', '\u{2283}', 1, 2), ('\u{2286}', '\u{2287}', 1, 2), ('\u{2295}', '\u{2295}', - 1, 2), ('\u{2299}', '\u{2299}', 1, 2), ('\u{22a5}', '\u{22a5}', 1, 2), ('\u{22bf}', - '\u{22bf}', 1, 2), ('\u{2312}', '\u{2312}', 1, 2), ('\u{231a}', '\u{231b}', 2, 2), - ('\u{2329}', '\u{232a}', 2, 2), ('\u{23e9}', '\u{23ec}', 2, 2), ('\u{23f0}', '\u{23f0}', 2, - 2), ('\u{23f3}', '\u{23f3}', 2, 2), ('\u{2460}', '\u{24e9}', 1, 2), ('\u{24eb}', '\u{254b}', - 1, 2), ('\u{2550}', '\u{2573}', 1, 2), ('\u{2580}', '\u{258f}', 1, 2), ('\u{2592}', - '\u{2595}', 1, 2), ('\u{25a0}', '\u{25a1}', 1, 2), ('\u{25a3}', '\u{25a9}', 1, 2), - ('\u{25b2}', '\u{25b3}', 1, 2), ('\u{25b6}', '\u{25b7}', 1, 2), ('\u{25bc}', '\u{25bd}', 1, - 2), ('\u{25c0}', '\u{25c1}', 1, 2), ('\u{25c6}', '\u{25c8}', 1, 2), ('\u{25cb}', '\u{25cb}', - 1, 2), ('\u{25ce}', '\u{25d1}', 1, 2), ('\u{25e2}', '\u{25e5}', 1, 2), ('\u{25ef}', - '\u{25ef}', 1, 2), ('\u{25fd}', '\u{25fe}', 2, 2), ('\u{2605}', '\u{2606}', 1, 2), - ('\u{2609}', '\u{2609}', 1, 2), ('\u{260e}', '\u{260f}', 1, 2), ('\u{2614}', '\u{2615}', 2, - 2), ('\u{261c}', '\u{261c}', 1, 2), ('\u{261e}', '\u{261e}', 1, 2), ('\u{2640}', '\u{2640}', - 1, 2), ('\u{2642}', '\u{2642}', 1, 2), ('\u{2648}', '\u{2653}', 2, 2), ('\u{2660}', - '\u{2661}', 1, 2), ('\u{2663}', '\u{2665}', 1, 2), ('\u{2667}', '\u{266a}', 1, 2), - ('\u{266c}', '\u{266d}', 1, 2), ('\u{266f}', '\u{266f}', 1, 2), ('\u{267f}', '\u{267f}', 2, - 2), ('\u{2693}', '\u{2693}', 2, 2), ('\u{269e}', '\u{269f}', 1, 2), ('\u{26a1}', '\u{26a1}', - 2, 2), ('\u{26aa}', '\u{26ab}', 2, 2), ('\u{26bd}', '\u{26be}', 2, 2), ('\u{26bf}', - '\u{26bf}', 1, 2), ('\u{26c4}', '\u{26c5}', 2, 2), ('\u{26c6}', '\u{26cd}', 1, 2), - ('\u{26ce}', '\u{26ce}', 2, 2), ('\u{26cf}', '\u{26d3}', 1, 2), ('\u{26d4}', '\u{26d4}', 2, - 2), ('\u{26d5}', '\u{26e1}', 1, 2), ('\u{26e3}', '\u{26e3}', 1, 2), ('\u{26e8}', '\u{26e9}', - 1, 2), ('\u{26ea}', '\u{26ea}', 2, 2), ('\u{26eb}', '\u{26f1}', 1, 2), ('\u{26f2}', - '\u{26f3}', 2, 2), ('\u{26f4}', '\u{26f4}', 1, 2), ('\u{26f5}', '\u{26f5}', 2, 2), - ('\u{26f6}', '\u{26f9}', 1, 2), ('\u{26fa}', '\u{26fa}', 2, 2), ('\u{26fb}', '\u{26fc}', 1, - 2), ('\u{26fd}', '\u{26fd}', 2, 2), ('\u{26fe}', '\u{26ff}', 1, 2), ('\u{2705}', '\u{2705}', - 2, 2), ('\u{270a}', '\u{270b}', 2, 2), ('\u{2728}', '\u{2728}', 2, 2), ('\u{273d}', - '\u{273d}', 1, 2), ('\u{274c}', '\u{274c}', 2, 2), ('\u{274e}', '\u{274e}', 2, 2), - ('\u{2753}', '\u{2755}', 2, 2), ('\u{2757}', '\u{2757}', 2, 2), ('\u{2776}', '\u{277f}', 1, - 2), ('\u{2795}', '\u{2797}', 2, 2), ('\u{27b0}', '\u{27b0}', 2, 2), ('\u{27bf}', '\u{27bf}', - 2, 2), ('\u{2b1b}', '\u{2b1c}', 2, 2), ('\u{2b50}', '\u{2b50}', 2, 2), ('\u{2b55}', - '\u{2b55}', 2, 2), ('\u{2b56}', '\u{2b59}', 1, 2), ('\u{2cef}', '\u{2cf1}', 0, 0), - ('\u{2d7f}', '\u{2d7f}', 0, 0), ('\u{2de0}', '\u{2dff}', 0, 0), ('\u{2e80}', '\u{2e99}', 2, - 2), ('\u{2e9b}', '\u{2ef3}', 2, 2), ('\u{2f00}', '\u{2fd5}', 2, 2), ('\u{2ff0}', '\u{2ffb}', - 2, 2), ('\u{3000}', '\u{3029}', 2, 2), ('\u{302a}', '\u{302d}', 0, 0), ('\u{302e}', - '\u{303e}', 2, 2), ('\u{3041}', '\u{3096}', 2, 2), ('\u{3099}', '\u{309a}', 0, 0), - ('\u{309b}', '\u{30ff}', 2, 2), ('\u{3105}', '\u{312f}', 2, 2), ('\u{3131}', '\u{318e}', 2, - 2), ('\u{3190}', '\u{31ba}', 2, 2), ('\u{31c0}', '\u{31e3}', 2, 2), ('\u{31f0}', '\u{321e}', + ('\u{d4d}', '\u{d4d}', 0, 0), ('\u{d62}', '\u{d63}', 0, 0), ('\u{d81}', '\u{d81}', 0, 0), + ('\u{dca}', '\u{dca}', 0, 0), ('\u{dd2}', '\u{dd4}', 0, 0), ('\u{dd6}', '\u{dd6}', 0, 0), + ('\u{e31}', '\u{e31}', 0, 0), ('\u{e34}', '\u{e3a}', 0, 0), ('\u{e47}', '\u{e4e}', 0, 0), + ('\u{eb1}', '\u{eb1}', 0, 0), ('\u{eb4}', '\u{ebc}', 0, 0), ('\u{ec8}', '\u{ecd}', 0, 0), + ('\u{f18}', '\u{f19}', 0, 0), ('\u{f35}', '\u{f35}', 0, 0), ('\u{f37}', '\u{f37}', 0, 0), + ('\u{f39}', '\u{f39}', 0, 0), ('\u{f71}', '\u{f7e}', 0, 0), ('\u{f80}', '\u{f84}', 0, 0), + ('\u{f86}', '\u{f87}', 0, 0), ('\u{f8d}', '\u{f97}', 0, 0), ('\u{f99}', '\u{fbc}', 0, 0), + ('\u{fc6}', '\u{fc6}', 0, 0), ('\u{102d}', '\u{1030}', 0, 0), ('\u{1032}', '\u{1037}', 0, + 0), ('\u{1039}', '\u{103a}', 0, 0), ('\u{103d}', '\u{103e}', 0, 0), ('\u{1058}', '\u{1059}', + 0, 0), ('\u{105e}', '\u{1060}', 0, 0), ('\u{1071}', '\u{1074}', 0, 0), ('\u{1082}', + '\u{1082}', 0, 0), ('\u{1085}', '\u{1086}', 0, 0), ('\u{108d}', '\u{108d}', 0, 0), + ('\u{109d}', '\u{109d}', 0, 0), ('\u{1100}', '\u{115f}', 2, 2), ('\u{1160}', '\u{11ff}', 0, + 0), ('\u{135d}', '\u{135f}', 0, 0), ('\u{1712}', '\u{1714}', 0, 0), ('\u{1732}', '\u{1734}', + 0, 0), ('\u{1752}', '\u{1753}', 0, 0), ('\u{1772}', '\u{1773}', 0, 0), ('\u{17b4}', + '\u{17b5}', 0, 0), ('\u{17b7}', '\u{17bd}', 0, 0), ('\u{17c6}', '\u{17c6}', 0, 0), + ('\u{17c9}', '\u{17d3}', 0, 0), ('\u{17dd}', '\u{17dd}', 0, 0), ('\u{180b}', '\u{180e}', 0, + 0), ('\u{1885}', '\u{1886}', 0, 0), ('\u{18a9}', '\u{18a9}', 0, 0), ('\u{1920}', '\u{1922}', + 0, 0), ('\u{1927}', '\u{1928}', 0, 0), ('\u{1932}', '\u{1932}', 0, 0), ('\u{1939}', + '\u{193b}', 0, 0), ('\u{1a17}', '\u{1a18}', 0, 0), ('\u{1a1b}', '\u{1a1b}', 0, 0), + ('\u{1a56}', '\u{1a56}', 0, 0), ('\u{1a58}', '\u{1a5e}', 0, 0), ('\u{1a60}', '\u{1a60}', 0, + 0), ('\u{1a62}', '\u{1a62}', 0, 0), ('\u{1a65}', '\u{1a6c}', 0, 0), ('\u{1a73}', '\u{1a7c}', + 0, 0), ('\u{1a7f}', '\u{1a7f}', 0, 0), ('\u{1ab0}', '\u{1ac0}', 0, 0), ('\u{1b00}', + '\u{1b03}', 0, 0), ('\u{1b34}', '\u{1b34}', 0, 0), ('\u{1b36}', '\u{1b3a}', 0, 0), + ('\u{1b3c}', '\u{1b3c}', 0, 0), ('\u{1b42}', '\u{1b42}', 0, 0), ('\u{1b6b}', '\u{1b73}', 0, + 0), ('\u{1b80}', '\u{1b81}', 0, 0), ('\u{1ba2}', '\u{1ba5}', 0, 0), ('\u{1ba8}', '\u{1ba9}', + 0, 0), ('\u{1bab}', '\u{1bad}', 0, 0), ('\u{1be6}', '\u{1be6}', 0, 0), ('\u{1be8}', + '\u{1be9}', 0, 0), ('\u{1bed}', '\u{1bed}', 0, 0), ('\u{1bef}', '\u{1bf1}', 0, 0), + ('\u{1c2c}', '\u{1c33}', 0, 0), ('\u{1c36}', '\u{1c37}', 0, 0), ('\u{1cd0}', '\u{1cd2}', 0, + 0), ('\u{1cd4}', '\u{1ce0}', 0, 0), ('\u{1ce2}', '\u{1ce8}', 0, 0), ('\u{1ced}', '\u{1ced}', + 0, 0), ('\u{1cf4}', '\u{1cf4}', 0, 0), ('\u{1cf8}', '\u{1cf9}', 0, 0), ('\u{1dc0}', + '\u{1df9}', 0, 0), ('\u{1dfb}', '\u{1dff}', 0, 0), ('\u{200b}', '\u{200f}', 0, 0), + ('\u{2010}', '\u{2010}', 1, 2), ('\u{2013}', '\u{2016}', 1, 2), ('\u{2018}', '\u{2019}', 1, + 2), ('\u{201c}', '\u{201d}', 1, 2), ('\u{2020}', '\u{2022}', 1, 2), ('\u{2024}', '\u{2027}', + 1, 2), ('\u{202a}', '\u{202e}', 0, 0), ('\u{2030}', '\u{2030}', 1, 2), ('\u{2032}', + '\u{2033}', 1, 2), ('\u{2035}', '\u{2035}', 1, 2), ('\u{203b}', '\u{203b}', 1, 2), + ('\u{203e}', '\u{203e}', 1, 2), ('\u{2060}', '\u{2064}', 0, 0), ('\u{2066}', '\u{206f}', 0, + 0), ('\u{2074}', '\u{2074}', 1, 2), ('\u{207f}', '\u{207f}', 1, 2), ('\u{2081}', '\u{2084}', + 1, 2), ('\u{20ac}', '\u{20ac}', 1, 2), ('\u{20d0}', '\u{20f0}', 0, 0), ('\u{2103}', + '\u{2103}', 1, 2), ('\u{2105}', '\u{2105}', 1, 2), ('\u{2109}', '\u{2109}', 1, 2), + ('\u{2113}', '\u{2113}', 1, 2), ('\u{2116}', '\u{2116}', 1, 2), ('\u{2121}', '\u{2122}', 1, + 2), ('\u{2126}', '\u{2126}', 1, 2), ('\u{212b}', '\u{212b}', 1, 2), ('\u{2153}', '\u{2154}', + 1, 2), ('\u{215b}', '\u{215e}', 1, 2), ('\u{2160}', '\u{216b}', 1, 2), ('\u{2170}', + '\u{2179}', 1, 2), ('\u{2189}', '\u{2189}', 1, 2), ('\u{2190}', '\u{2199}', 1, 2), + ('\u{21b8}', '\u{21b9}', 1, 2), ('\u{21d2}', '\u{21d2}', 1, 2), ('\u{21d4}', '\u{21d4}', 1, + 2), ('\u{21e7}', '\u{21e7}', 1, 2), ('\u{2200}', '\u{2200}', 1, 2), ('\u{2202}', '\u{2203}', + 1, 2), ('\u{2207}', '\u{2208}', 1, 2), ('\u{220b}', '\u{220b}', 1, 2), ('\u{220f}', + '\u{220f}', 1, 2), ('\u{2211}', '\u{2211}', 1, 2), ('\u{2215}', '\u{2215}', 1, 2), + ('\u{221a}', '\u{221a}', 1, 2), ('\u{221d}', '\u{2220}', 1, 2), ('\u{2223}', '\u{2223}', 1, + 2), ('\u{2225}', '\u{2225}', 1, 2), ('\u{2227}', '\u{222c}', 1, 2), ('\u{222e}', '\u{222e}', + 1, 2), ('\u{2234}', '\u{2237}', 1, 2), ('\u{223c}', '\u{223d}', 1, 2), ('\u{2248}', + '\u{2248}', 1, 2), ('\u{224c}', '\u{224c}', 1, 2), ('\u{2252}', '\u{2252}', 1, 2), + ('\u{2260}', '\u{2261}', 1, 2), ('\u{2264}', '\u{2267}', 1, 2), ('\u{226a}', '\u{226b}', 1, + 2), ('\u{226e}', '\u{226f}', 1, 2), ('\u{2282}', '\u{2283}', 1, 2), ('\u{2286}', '\u{2287}', + 1, 2), ('\u{2295}', '\u{2295}', 1, 2), ('\u{2299}', '\u{2299}', 1, 2), ('\u{22a5}', + '\u{22a5}', 1, 2), ('\u{22bf}', '\u{22bf}', 1, 2), ('\u{2312}', '\u{2312}', 1, 2), + ('\u{231a}', '\u{231b}', 2, 2), ('\u{2329}', '\u{232a}', 2, 2), ('\u{23e9}', '\u{23ec}', 2, + 2), ('\u{23f0}', '\u{23f0}', 2, 2), ('\u{23f3}', '\u{23f3}', 2, 2), ('\u{2460}', '\u{24e9}', + 1, 2), ('\u{24eb}', '\u{254b}', 1, 2), ('\u{2550}', '\u{2573}', 1, 2), ('\u{2580}', + '\u{258f}', 1, 2), ('\u{2592}', '\u{2595}', 1, 2), ('\u{25a0}', '\u{25a1}', 1, 2), + ('\u{25a3}', '\u{25a9}', 1, 2), ('\u{25b2}', '\u{25b3}', 1, 2), ('\u{25b6}', '\u{25b7}', 1, + 2), ('\u{25bc}', '\u{25bd}', 1, 2), ('\u{25c0}', '\u{25c1}', 1, 2), ('\u{25c6}', '\u{25c8}', + 1, 2), ('\u{25cb}', '\u{25cb}', 1, 2), ('\u{25ce}', '\u{25d1}', 1, 2), ('\u{25e2}', + '\u{25e5}', 1, 2), ('\u{25ef}', '\u{25ef}', 1, 2), ('\u{25fd}', '\u{25fe}', 2, 2), + ('\u{2605}', '\u{2606}', 1, 2), ('\u{2609}', '\u{2609}', 1, 2), ('\u{260e}', '\u{260f}', 1, + 2), ('\u{2614}', '\u{2615}', 2, 2), ('\u{261c}', '\u{261c}', 1, 2), ('\u{261e}', '\u{261e}', + 1, 2), ('\u{2640}', '\u{2640}', 1, 2), ('\u{2642}', '\u{2642}', 1, 2), ('\u{2648}', + '\u{2653}', 2, 2), ('\u{2660}', '\u{2661}', 1, 2), ('\u{2663}', '\u{2665}', 1, 2), + ('\u{2667}', '\u{266a}', 1, 2), ('\u{266c}', '\u{266d}', 1, 2), ('\u{266f}', '\u{266f}', 1, + 2), ('\u{267f}', '\u{267f}', 2, 2), ('\u{2693}', '\u{2693}', 2, 2), ('\u{269e}', '\u{269f}', + 1, 2), ('\u{26a1}', '\u{26a1}', 2, 2), ('\u{26aa}', '\u{26ab}', 2, 2), ('\u{26bd}', + '\u{26be}', 2, 2), ('\u{26bf}', '\u{26bf}', 1, 2), ('\u{26c4}', '\u{26c5}', 2, 2), + ('\u{26c6}', '\u{26cd}', 1, 2), ('\u{26ce}', '\u{26ce}', 2, 2), ('\u{26cf}', '\u{26d3}', 1, + 2), ('\u{26d4}', '\u{26d4}', 2, 2), ('\u{26d5}', '\u{26e1}', 1, 2), ('\u{26e3}', '\u{26e3}', + 1, 2), ('\u{26e8}', '\u{26e9}', 1, 2), ('\u{26ea}', '\u{26ea}', 2, 2), ('\u{26eb}', + '\u{26f1}', 1, 2), ('\u{26f2}', '\u{26f3}', 2, 2), ('\u{26f4}', '\u{26f4}', 1, 2), + ('\u{26f5}', '\u{26f5}', 2, 2), ('\u{26f6}', '\u{26f9}', 1, 2), ('\u{26fa}', '\u{26fa}', 2, + 2), ('\u{26fb}', '\u{26fc}', 1, 2), ('\u{26fd}', '\u{26fd}', 2, 2), ('\u{26fe}', '\u{26ff}', + 1, 2), ('\u{2705}', '\u{2705}', 2, 2), ('\u{270a}', '\u{270b}', 2, 2), ('\u{2728}', + '\u{2728}', 2, 2), ('\u{273d}', '\u{273d}', 1, 2), ('\u{274c}', '\u{274c}', 2, 2), + ('\u{274e}', '\u{274e}', 2, 2), ('\u{2753}', '\u{2755}', 2, 2), ('\u{2757}', '\u{2757}', 2, + 2), ('\u{2776}', '\u{277f}', 1, 2), ('\u{2795}', '\u{2797}', 2, 2), ('\u{27b0}', '\u{27b0}', + 2, 2), ('\u{27bf}', '\u{27bf}', 2, 2), ('\u{2b1b}', '\u{2b1c}', 2, 2), ('\u{2b50}', + '\u{2b50}', 2, 2), ('\u{2b55}', '\u{2b55}', 2, 2), ('\u{2b56}', '\u{2b59}', 1, 2), + ('\u{2cef}', '\u{2cf1}', 0, 0), ('\u{2d7f}', '\u{2d7f}', 0, 0), ('\u{2de0}', '\u{2dff}', 0, + 0), ('\u{2e80}', '\u{2e99}', 2, 2), ('\u{2e9b}', '\u{2ef3}', 2, 2), ('\u{2f00}', '\u{2fd5}', + 2, 2), ('\u{2ff0}', '\u{2ffb}', 2, 2), ('\u{3000}', '\u{3029}', 2, 2), ('\u{302a}', + '\u{302d}', 0, 0), ('\u{302e}', '\u{303e}', 2, 2), ('\u{3041}', '\u{3096}', 2, 2), + ('\u{3099}', '\u{309a}', 0, 0), ('\u{309b}', '\u{30ff}', 2, 2), ('\u{3105}', '\u{312f}', 2, + 2), ('\u{3131}', '\u{318e}', 2, 2), ('\u{3190}', '\u{31e3}', 2, 2), ('\u{31f0}', '\u{321e}', 2, 2), ('\u{3220}', '\u{3247}', 2, 2), ('\u{3248}', '\u{324f}', 1, 2), ('\u{3250}', '\u{4dbf}', 2, 2), ('\u{4e00}', '\u{a48c}', 2, 2), ('\u{a490}', '\u{a4c6}', 2, 2), ('\u{a66f}', '\u{a672}', 0, 0), ('\u{a674}', '\u{a67d}', 0, 0), ('\u{a69e}', '\u{a69f}', 0, 0), ('\u{a6f0}', '\u{a6f1}', 0, 0), ('\u{a802}', '\u{a802}', 0, 0), ('\u{a806}', '\u{a806}', - 0, 0), ('\u{a80b}', '\u{a80b}', 0, 0), ('\u{a825}', '\u{a826}', 0, 0), ('\u{a8c4}', - '\u{a8c5}', 0, 0), ('\u{a8e0}', '\u{a8f1}', 0, 0), ('\u{a8ff}', '\u{a8ff}', 0, 0), - ('\u{a926}', '\u{a92d}', 0, 0), ('\u{a947}', '\u{a951}', 0, 0), ('\u{a960}', '\u{a97c}', 2, - 2), ('\u{a980}', '\u{a982}', 0, 0), ('\u{a9b3}', '\u{a9b3}', 0, 0), ('\u{a9b6}', '\u{a9b9}', - 0, 0), ('\u{a9bc}', '\u{a9bd}', 0, 0), ('\u{a9e5}', '\u{a9e5}', 0, 0), ('\u{aa29}', - '\u{aa2e}', 0, 0), ('\u{aa31}', '\u{aa32}', 0, 0), ('\u{aa35}', '\u{aa36}', 0, 0), - ('\u{aa43}', '\u{aa43}', 0, 0), ('\u{aa4c}', '\u{aa4c}', 0, 0), ('\u{aa7c}', '\u{aa7c}', 0, - 0), ('\u{aab0}', '\u{aab0}', 0, 0), ('\u{aab2}', '\u{aab4}', 0, 0), ('\u{aab7}', '\u{aab8}', - 0, 0), ('\u{aabe}', '\u{aabf}', 0, 0), ('\u{aac1}', '\u{aac1}', 0, 0), ('\u{aaec}', - '\u{aaed}', 0, 0), ('\u{aaf6}', '\u{aaf6}', 0, 0), ('\u{abe5}', '\u{abe5}', 0, 0), - ('\u{abe8}', '\u{abe8}', 0, 0), ('\u{abed}', '\u{abed}', 0, 0), ('\u{ac00}', '\u{d7a3}', 2, - 2), ('\u{e000}', '\u{f8ff}', 1, 2), ('\u{f900}', '\u{faff}', 2, 2), ('\u{fb1e}', '\u{fb1e}', - 0, 0), ('\u{fe00}', '\u{fe0f}', 0, 0), ('\u{fe10}', '\u{fe19}', 2, 2), ('\u{fe20}', - '\u{fe2f}', 0, 0), ('\u{fe30}', '\u{fe52}', 2, 2), ('\u{fe54}', '\u{fe66}', 2, 2), - ('\u{fe68}', '\u{fe6b}', 2, 2), ('\u{feff}', '\u{feff}', 0, 0), ('\u{ff01}', '\u{ff60}', 2, - 2), ('\u{ffe0}', '\u{ffe6}', 2, 2), ('\u{fff9}', '\u{fffb}', 0, 0), ('\u{fffd}', '\u{fffd}', - 1, 2), ('\u{101fd}', '\u{101fd}', 0, 0), ('\u{102e0}', '\u{102e0}', 0, 0), ('\u{10376}', - '\u{1037a}', 0, 0), ('\u{10a01}', '\u{10a03}', 0, 0), ('\u{10a05}', '\u{10a06}', 0, 0), - ('\u{10a0c}', '\u{10a0f}', 0, 0), ('\u{10a38}', '\u{10a3a}', 0, 0), ('\u{10a3f}', - '\u{10a3f}', 0, 0), ('\u{10ae5}', '\u{10ae6}', 0, 0), ('\u{10d24}', '\u{10d27}', 0, 0), - ('\u{10f46}', '\u{10f50}', 0, 0), ('\u{11001}', '\u{11001}', 0, 0), ('\u{11038}', - '\u{11046}', 0, 0), ('\u{1107f}', '\u{11081}', 0, 0), ('\u{110b3}', '\u{110b6}', 0, 0), - ('\u{110b9}', '\u{110ba}', 0, 0), ('\u{110bd}', '\u{110bd}', 0, 0), ('\u{110cd}', - '\u{110cd}', 0, 0), ('\u{11100}', '\u{11102}', 0, 0), ('\u{11127}', '\u{1112b}', 0, 0), - ('\u{1112d}', '\u{11134}', 0, 0), ('\u{11173}', '\u{11173}', 0, 0), ('\u{11180}', - '\u{11181}', 0, 0), ('\u{111b6}', '\u{111be}', 0, 0), ('\u{111c9}', '\u{111cc}', 0, 0), - ('\u{1122f}', '\u{11231}', 0, 0), ('\u{11234}', '\u{11234}', 0, 0), ('\u{11236}', - '\u{11237}', 0, 0), ('\u{1123e}', '\u{1123e}', 0, 0), ('\u{112df}', '\u{112df}', 0, 0), - ('\u{112e3}', '\u{112ea}', 0, 0), ('\u{11300}', '\u{11301}', 0, 0), ('\u{1133b}', - '\u{1133c}', 0, 0), ('\u{11340}', '\u{11340}', 0, 0), ('\u{11366}', '\u{1136c}', 0, 0), - ('\u{11370}', '\u{11374}', 0, 0), ('\u{11438}', '\u{1143f}', 0, 0), ('\u{11442}', - '\u{11444}', 0, 0), ('\u{11446}', '\u{11446}', 0, 0), ('\u{1145e}', '\u{1145e}', 0, 0), - ('\u{114b3}', '\u{114b8}', 0, 0), ('\u{114ba}', '\u{114ba}', 0, 0), ('\u{114bf}', - '\u{114c0}', 0, 0), ('\u{114c2}', '\u{114c3}', 0, 0), ('\u{115b2}', '\u{115b5}', 0, 0), - ('\u{115bc}', '\u{115bd}', 0, 0), ('\u{115bf}', '\u{115c0}', 0, 0), ('\u{115dc}', - '\u{115dd}', 0, 0), ('\u{11633}', '\u{1163a}', 0, 0), ('\u{1163d}', '\u{1163d}', 0, 0), - ('\u{1163f}', '\u{11640}', 0, 0), ('\u{116ab}', '\u{116ab}', 0, 0), ('\u{116ad}', - '\u{116ad}', 0, 0), ('\u{116b0}', '\u{116b5}', 0, 0), ('\u{116b7}', '\u{116b7}', 0, 0), - ('\u{1171d}', '\u{1171f}', 0, 0), ('\u{11722}', '\u{11725}', 0, 0), ('\u{11727}', - '\u{1172b}', 0, 0), ('\u{1182f}', '\u{11837}', 0, 0), ('\u{11839}', '\u{1183a}', 0, 0), - ('\u{119d4}', '\u{119d7}', 0, 0), ('\u{119da}', '\u{119db}', 0, 0), ('\u{119e0}', - '\u{119e0}', 0, 0), ('\u{11a01}', '\u{11a0a}', 0, 0), ('\u{11a33}', '\u{11a38}', 0, 0), - ('\u{11a3b}', '\u{11a3e}', 0, 0), ('\u{11a47}', '\u{11a47}', 0, 0), ('\u{11a51}', - '\u{11a56}', 0, 0), ('\u{11a59}', '\u{11a5b}', 0, 0), ('\u{11a8a}', '\u{11a96}', 0, 0), - ('\u{11a98}', '\u{11a99}', 0, 0), ('\u{11c30}', '\u{11c36}', 0, 0), ('\u{11c38}', - '\u{11c3d}', 0, 0), ('\u{11c3f}', '\u{11c3f}', 0, 0), ('\u{11c92}', '\u{11ca7}', 0, 0), - ('\u{11caa}', '\u{11cb0}', 0, 0), ('\u{11cb2}', '\u{11cb3}', 0, 0), ('\u{11cb5}', - '\u{11cb6}', 0, 0), ('\u{11d31}', '\u{11d36}', 0, 0), ('\u{11d3a}', '\u{11d3a}', 0, 0), - ('\u{11d3c}', '\u{11d3d}', 0, 0), ('\u{11d3f}', '\u{11d45}', 0, 0), ('\u{11d47}', - '\u{11d47}', 0, 0), ('\u{11d90}', '\u{11d91}', 0, 0), ('\u{11d95}', '\u{11d95}', 0, 0), - ('\u{11d97}', '\u{11d97}', 0, 0), ('\u{11ef3}', '\u{11ef4}', 0, 0), ('\u{13430}', - '\u{13438}', 0, 0), ('\u{16af0}', '\u{16af4}', 0, 0), ('\u{16b30}', '\u{16b36}', 0, 0), - ('\u{16f4f}', '\u{16f4f}', 0, 0), ('\u{16f8f}', '\u{16f92}', 0, 0), ('\u{16fe0}', - '\u{16fe3}', 2, 2), ('\u{17000}', '\u{187f7}', 2, 2), ('\u{18800}', '\u{18af2}', 2, 2), - ('\u{1b000}', '\u{1b11e}', 2, 2), ('\u{1b150}', '\u{1b152}', 2, 2), ('\u{1b164}', - '\u{1b167}', 2, 2), ('\u{1b170}', '\u{1b2fb}', 2, 2), ('\u{1bc9d}', '\u{1bc9e}', 0, 0), - ('\u{1bca0}', '\u{1bca3}', 0, 0), ('\u{1d167}', '\u{1d169}', 0, 0), ('\u{1d173}', - '\u{1d182}', 0, 0), ('\u{1d185}', '\u{1d18b}', 0, 0), ('\u{1d1aa}', '\u{1d1ad}', 0, 0), - ('\u{1d242}', '\u{1d244}', 0, 0), ('\u{1da00}', '\u{1da36}', 0, 0), ('\u{1da3b}', - '\u{1da6c}', 0, 0), ('\u{1da75}', '\u{1da75}', 0, 0), ('\u{1da84}', '\u{1da84}', 0, 0), - ('\u{1da9b}', '\u{1da9f}', 0, 0), ('\u{1daa1}', '\u{1daaf}', 0, 0), ('\u{1e000}', - '\u{1e006}', 0, 0), ('\u{1e008}', '\u{1e018}', 0, 0), ('\u{1e01b}', '\u{1e021}', 0, 0), - ('\u{1e023}', '\u{1e024}', 0, 0), ('\u{1e026}', '\u{1e02a}', 0, 0), ('\u{1e130}', - '\u{1e136}', 0, 0), ('\u{1e2ec}', '\u{1e2ef}', 0, 0), ('\u{1e8d0}', '\u{1e8d6}', 0, 0), - ('\u{1e944}', '\u{1e94a}', 0, 0), ('\u{1f004}', '\u{1f004}', 2, 2), ('\u{1f0cf}', - '\u{1f0cf}', 2, 2), ('\u{1f100}', '\u{1f10a}', 1, 2), ('\u{1f110}', '\u{1f12d}', 1, 2), - ('\u{1f130}', '\u{1f169}', 1, 2), ('\u{1f170}', '\u{1f18d}', 1, 2), ('\u{1f18e}', - '\u{1f18e}', 2, 2), ('\u{1f18f}', '\u{1f190}', 1, 2), ('\u{1f191}', '\u{1f19a}', 2, 2), - ('\u{1f19b}', '\u{1f1ac}', 1, 2), ('\u{1f200}', '\u{1f202}', 2, 2), ('\u{1f210}', - '\u{1f23b}', 2, 2), ('\u{1f240}', '\u{1f248}', 2, 2), ('\u{1f250}', '\u{1f251}', 2, 2), - ('\u{1f260}', '\u{1f265}', 2, 2), ('\u{1f300}', '\u{1f320}', 2, 2), ('\u{1f32d}', - '\u{1f335}', 2, 2), ('\u{1f337}', '\u{1f37c}', 2, 2), ('\u{1f37e}', '\u{1f393}', 2, 2), - ('\u{1f3a0}', '\u{1f3ca}', 2, 2), ('\u{1f3cf}', '\u{1f3d3}', 2, 2), ('\u{1f3e0}', - '\u{1f3f0}', 2, 2), ('\u{1f3f4}', '\u{1f3f4}', 2, 2), ('\u{1f3f8}', '\u{1f43e}', 2, 2), - ('\u{1f440}', '\u{1f440}', 2, 2), ('\u{1f442}', '\u{1f4fc}', 2, 2), ('\u{1f4ff}', - '\u{1f53d}', 2, 2), ('\u{1f54b}', '\u{1f54e}', 2, 2), ('\u{1f550}', '\u{1f567}', 2, 2), - ('\u{1f57a}', '\u{1f57a}', 2, 2), ('\u{1f595}', '\u{1f596}', 2, 2), ('\u{1f5a4}', - '\u{1f5a4}', 2, 2), ('\u{1f5fb}', '\u{1f64f}', 2, 2), ('\u{1f680}', '\u{1f6c5}', 2, 2), - ('\u{1f6cc}', '\u{1f6cc}', 2, 2), ('\u{1f6d0}', '\u{1f6d2}', 2, 2), ('\u{1f6d5}', - '\u{1f6d5}', 2, 2), ('\u{1f6eb}', '\u{1f6ec}', 2, 2), ('\u{1f6f4}', '\u{1f6fa}', 2, 2), - ('\u{1f7e0}', '\u{1f7eb}', 2, 2), ('\u{1f90d}', '\u{1f971}', 2, 2), ('\u{1f973}', - '\u{1f976}', 2, 2), ('\u{1f97a}', '\u{1f9a2}', 2, 2), ('\u{1f9a5}', '\u{1f9aa}', 2, 2), - ('\u{1f9ae}', '\u{1f9ca}', 2, 2), ('\u{1f9cd}', '\u{1f9ff}', 2, 2), ('\u{1fa70}', - '\u{1fa73}', 2, 2), ('\u{1fa78}', '\u{1fa7a}', 2, 2), ('\u{1fa80}', '\u{1fa82}', 2, 2), - ('\u{1fa90}', '\u{1fa95}', 2, 2), ('\u{20000}', '\u{2fffd}', 2, 2), ('\u{30000}', - '\u{3fffd}', 2, 2), ('\u{e0001}', '\u{e0001}', 0, 0), ('\u{e0020}', '\u{e007f}', 0, 0), - ('\u{e0100}', '\u{e01ef}', 0, 0), ('\u{f0000}', '\u{ffffd}', 1, 2), ('\u{100000}', - '\u{10fffd}', 1, 2) + 0, 0), ('\u{a80b}', '\u{a80b}', 0, 0), ('\u{a825}', '\u{a826}', 0, 0), ('\u{a82c}', + '\u{a82c}', 0, 0), ('\u{a8c4}', '\u{a8c5}', 0, 0), ('\u{a8e0}', '\u{a8f1}', 0, 0), + ('\u{a8ff}', '\u{a8ff}', 0, 0), ('\u{a926}', '\u{a92d}', 0, 0), ('\u{a947}', '\u{a951}', 0, + 0), ('\u{a960}', '\u{a97c}', 2, 2), ('\u{a980}', '\u{a982}', 0, 0), ('\u{a9b3}', '\u{a9b3}', + 0, 0), ('\u{a9b6}', '\u{a9b9}', 0, 0), ('\u{a9bc}', '\u{a9bd}', 0, 0), ('\u{a9e5}', + '\u{a9e5}', 0, 0), ('\u{aa29}', '\u{aa2e}', 0, 0), ('\u{aa31}', '\u{aa32}', 0, 0), + ('\u{aa35}', '\u{aa36}', 0, 0), ('\u{aa43}', '\u{aa43}', 0, 0), ('\u{aa4c}', '\u{aa4c}', 0, + 0), ('\u{aa7c}', '\u{aa7c}', 0, 0), ('\u{aab0}', '\u{aab0}', 0, 0), ('\u{aab2}', '\u{aab4}', + 0, 0), ('\u{aab7}', '\u{aab8}', 0, 0), ('\u{aabe}', '\u{aabf}', 0, 0), ('\u{aac1}', + '\u{aac1}', 0, 0), ('\u{aaec}', '\u{aaed}', 0, 0), ('\u{aaf6}', '\u{aaf6}', 0, 0), + ('\u{abe5}', '\u{abe5}', 0, 0), ('\u{abe8}', '\u{abe8}', 0, 0), ('\u{abed}', '\u{abed}', 0, + 0), ('\u{ac00}', '\u{d7a3}', 2, 2), ('\u{e000}', '\u{f8ff}', 1, 2), ('\u{f900}', '\u{faff}', + 2, 2), ('\u{fb1e}', '\u{fb1e}', 0, 0), ('\u{fe00}', '\u{fe0f}', 0, 0), ('\u{fe10}', + '\u{fe19}', 2, 2), ('\u{fe20}', '\u{fe2f}', 0, 0), ('\u{fe30}', '\u{fe52}', 2, 2), + ('\u{fe54}', '\u{fe66}', 2, 2), ('\u{fe68}', '\u{fe6b}', 2, 2), ('\u{feff}', '\u{feff}', 0, + 0), ('\u{ff01}', '\u{ff60}', 2, 2), ('\u{ffe0}', '\u{ffe6}', 2, 2), ('\u{fff9}', '\u{fffb}', + 0, 0), ('\u{fffd}', '\u{fffd}', 1, 2), ('\u{101fd}', '\u{101fd}', 0, 0), ('\u{102e0}', + '\u{102e0}', 0, 0), ('\u{10376}', '\u{1037a}', 0, 0), ('\u{10a01}', '\u{10a03}', 0, 0), + ('\u{10a05}', '\u{10a06}', 0, 0), ('\u{10a0c}', '\u{10a0f}', 0, 0), ('\u{10a38}', + '\u{10a3a}', 0, 0), ('\u{10a3f}', '\u{10a3f}', 0, 0), ('\u{10ae5}', '\u{10ae6}', 0, 0), + ('\u{10d24}', '\u{10d27}', 0, 0), ('\u{10eab}', '\u{10eac}', 0, 0), ('\u{10f46}', + '\u{10f50}', 0, 0), ('\u{11001}', '\u{11001}', 0, 0), ('\u{11038}', '\u{11046}', 0, 0), + ('\u{1107f}', '\u{11081}', 0, 0), ('\u{110b3}', '\u{110b6}', 0, 0), ('\u{110b9}', + '\u{110ba}', 0, 0), ('\u{110bd}', '\u{110bd}', 0, 0), ('\u{110cd}', '\u{110cd}', 0, 0), + ('\u{11100}', '\u{11102}', 0, 0), ('\u{11127}', '\u{1112b}', 0, 0), ('\u{1112d}', + '\u{11134}', 0, 0), ('\u{11173}', '\u{11173}', 0, 0), ('\u{11180}', '\u{11181}', 0, 0), + ('\u{111b6}', '\u{111be}', 0, 0), ('\u{111c9}', '\u{111cc}', 0, 0), ('\u{111cf}', + '\u{111cf}', 0, 0), ('\u{1122f}', '\u{11231}', 0, 0), ('\u{11234}', '\u{11234}', 0, 0), + ('\u{11236}', '\u{11237}', 0, 0), ('\u{1123e}', '\u{1123e}', 0, 0), ('\u{112df}', + '\u{112df}', 0, 0), ('\u{112e3}', '\u{112ea}', 0, 0), ('\u{11300}', '\u{11301}', 0, 0), + ('\u{1133b}', '\u{1133c}', 0, 0), ('\u{11340}', '\u{11340}', 0, 0), ('\u{11366}', + '\u{1136c}', 0, 0), ('\u{11370}', '\u{11374}', 0, 0), ('\u{11438}', '\u{1143f}', 0, 0), + ('\u{11442}', '\u{11444}', 0, 0), ('\u{11446}', '\u{11446}', 0, 0), ('\u{1145e}', + '\u{1145e}', 0, 0), ('\u{114b3}', '\u{114b8}', 0, 0), ('\u{114ba}', '\u{114ba}', 0, 0), + ('\u{114bf}', '\u{114c0}', 0, 0), ('\u{114c2}', '\u{114c3}', 0, 0), ('\u{115b2}', + '\u{115b5}', 0, 0), ('\u{115bc}', '\u{115bd}', 0, 0), ('\u{115bf}', '\u{115c0}', 0, 0), + ('\u{115dc}', '\u{115dd}', 0, 0), ('\u{11633}', '\u{1163a}', 0, 0), ('\u{1163d}', + '\u{1163d}', 0, 0), ('\u{1163f}', '\u{11640}', 0, 0), ('\u{116ab}', '\u{116ab}', 0, 0), + ('\u{116ad}', '\u{116ad}', 0, 0), ('\u{116b0}', '\u{116b5}', 0, 0), ('\u{116b7}', + '\u{116b7}', 0, 0), ('\u{1171d}', '\u{1171f}', 0, 0), ('\u{11722}', '\u{11725}', 0, 0), + ('\u{11727}', '\u{1172b}', 0, 0), ('\u{1182f}', '\u{11837}', 0, 0), ('\u{11839}', + '\u{1183a}', 0, 0), ('\u{1193b}', '\u{1193c}', 0, 0), ('\u{1193e}', '\u{1193e}', 0, 0), + ('\u{11943}', '\u{11943}', 0, 0), ('\u{119d4}', '\u{119d7}', 0, 0), ('\u{119da}', + '\u{119db}', 0, 0), ('\u{119e0}', '\u{119e0}', 0, 0), ('\u{11a01}', '\u{11a0a}', 0, 0), + ('\u{11a33}', '\u{11a38}', 0, 0), ('\u{11a3b}', '\u{11a3e}', 0, 0), ('\u{11a47}', + '\u{11a47}', 0, 0), ('\u{11a51}', '\u{11a56}', 0, 0), ('\u{11a59}', '\u{11a5b}', 0, 0), + ('\u{11a8a}', '\u{11a96}', 0, 0), ('\u{11a98}', '\u{11a99}', 0, 0), ('\u{11c30}', + '\u{11c36}', 0, 0), ('\u{11c38}', '\u{11c3d}', 0, 0), ('\u{11c3f}', '\u{11c3f}', 0, 0), + ('\u{11c92}', '\u{11ca7}', 0, 0), ('\u{11caa}', '\u{11cb0}', 0, 0), ('\u{11cb2}', + '\u{11cb3}', 0, 0), ('\u{11cb5}', '\u{11cb6}', 0, 0), ('\u{11d31}', '\u{11d36}', 0, 0), + ('\u{11d3a}', '\u{11d3a}', 0, 0), ('\u{11d3c}', '\u{11d3d}', 0, 0), ('\u{11d3f}', + '\u{11d45}', 0, 0), ('\u{11d47}', '\u{11d47}', 0, 0), ('\u{11d90}', '\u{11d91}', 0, 0), + ('\u{11d95}', '\u{11d95}', 0, 0), ('\u{11d97}', '\u{11d97}', 0, 0), ('\u{11ef3}', + '\u{11ef4}', 0, 0), ('\u{13430}', '\u{13438}', 0, 0), ('\u{16af0}', '\u{16af4}', 0, 0), + ('\u{16b30}', '\u{16b36}', 0, 0), ('\u{16f4f}', '\u{16f4f}', 0, 0), ('\u{16f8f}', + '\u{16f92}', 0, 0), ('\u{16fe0}', '\u{16fe3}', 2, 2), ('\u{16fe4}', '\u{16fe4}', 0, 0), + ('\u{16ff0}', '\u{16ff1}', 2, 2), ('\u{17000}', '\u{187f7}', 2, 2), ('\u{18800}', + '\u{18cd5}', 2, 2), ('\u{18d00}', '\u{18d08}', 2, 2), ('\u{1b000}', '\u{1b11e}', 2, 2), + ('\u{1b150}', '\u{1b152}', 2, 2), ('\u{1b164}', '\u{1b167}', 2, 2), ('\u{1b170}', + '\u{1b2fb}', 2, 2), ('\u{1bc9d}', '\u{1bc9e}', 0, 0), ('\u{1bca0}', '\u{1bca3}', 0, 0), + ('\u{1d167}', '\u{1d169}', 0, 0), ('\u{1d173}', '\u{1d182}', 0, 0), ('\u{1d185}', + '\u{1d18b}', 0, 0), ('\u{1d1aa}', '\u{1d1ad}', 0, 0), ('\u{1d242}', '\u{1d244}', 0, 0), + ('\u{1da00}', '\u{1da36}', 0, 0), ('\u{1da3b}', '\u{1da6c}', 0, 0), ('\u{1da75}', + '\u{1da75}', 0, 0), ('\u{1da84}', '\u{1da84}', 0, 0), ('\u{1da9b}', '\u{1da9f}', 0, 0), + ('\u{1daa1}', '\u{1daaf}', 0, 0), ('\u{1e000}', '\u{1e006}', 0, 0), ('\u{1e008}', + '\u{1e018}', 0, 0), ('\u{1e01b}', '\u{1e021}', 0, 0), ('\u{1e023}', '\u{1e024}', 0, 0), + ('\u{1e026}', '\u{1e02a}', 0, 0), ('\u{1e130}', '\u{1e136}', 0, 0), ('\u{1e2ec}', + '\u{1e2ef}', 0, 0), ('\u{1e8d0}', '\u{1e8d6}', 0, 0), ('\u{1e944}', '\u{1e94a}', 0, 0), + ('\u{1f004}', '\u{1f004}', 2, 2), ('\u{1f0cf}', '\u{1f0cf}', 2, 2), ('\u{1f100}', + '\u{1f10a}', 1, 2), ('\u{1f110}', '\u{1f12d}', 1, 2), ('\u{1f130}', '\u{1f169}', 1, 2), + ('\u{1f170}', '\u{1f18d}', 1, 2), ('\u{1f18e}', '\u{1f18e}', 2, 2), ('\u{1f18f}', + '\u{1f190}', 1, 2), ('\u{1f191}', '\u{1f19a}', 2, 2), ('\u{1f19b}', '\u{1f1ac}', 1, 2), + ('\u{1f200}', '\u{1f202}', 2, 2), ('\u{1f210}', '\u{1f23b}', 2, 2), ('\u{1f240}', + '\u{1f248}', 2, 2), ('\u{1f250}', '\u{1f251}', 2, 2), ('\u{1f260}', '\u{1f265}', 2, 2), + ('\u{1f300}', '\u{1f320}', 2, 2), ('\u{1f32d}', '\u{1f335}', 2, 2), ('\u{1f337}', + '\u{1f37c}', 2, 2), ('\u{1f37e}', '\u{1f393}', 2, 2), ('\u{1f3a0}', '\u{1f3ca}', 2, 2), + ('\u{1f3cf}', '\u{1f3d3}', 2, 2), ('\u{1f3e0}', '\u{1f3f0}', 2, 2), ('\u{1f3f4}', + '\u{1f3f4}', 2, 2), ('\u{1f3f8}', '\u{1f43e}', 2, 2), ('\u{1f440}', '\u{1f440}', 2, 2), + ('\u{1f442}', '\u{1f4fc}', 2, 2), ('\u{1f4ff}', '\u{1f53d}', 2, 2), ('\u{1f54b}', + '\u{1f54e}', 2, 2), ('\u{1f550}', '\u{1f567}', 2, 2), ('\u{1f57a}', '\u{1f57a}', 2, 2), + ('\u{1f595}', '\u{1f596}', 2, 2), ('\u{1f5a4}', '\u{1f5a4}', 2, 2), ('\u{1f5fb}', + '\u{1f64f}', 2, 2), ('\u{1f680}', '\u{1f6c5}', 2, 2), ('\u{1f6cc}', '\u{1f6cc}', 2, 2), + ('\u{1f6d0}', '\u{1f6d2}', 2, 2), ('\u{1f6d5}', '\u{1f6d7}', 2, 2), ('\u{1f6eb}', + '\u{1f6ec}', 2, 2), ('\u{1f6f4}', '\u{1f6fc}', 2, 2), ('\u{1f7e0}', '\u{1f7eb}', 2, 2), + ('\u{1f90c}', '\u{1f93a}', 2, 2), ('\u{1f93c}', '\u{1f945}', 2, 2), ('\u{1f947}', + '\u{1f978}', 2, 2), ('\u{1f97a}', '\u{1f9cb}', 2, 2), ('\u{1f9cd}', '\u{1f9ff}', 2, 2), + ('\u{1fa70}', '\u{1fa74}', 2, 2), ('\u{1fa78}', '\u{1fa7a}', 2, 2), ('\u{1fa80}', + '\u{1fa86}', 2, 2), ('\u{1fa90}', '\u{1faa8}', 2, 2), ('\u{1fab0}', '\u{1fab6}', 2, 2), + ('\u{1fac0}', '\u{1fac2}', 2, 2), ('\u{1fad0}', '\u{1fad6}', 2, 2), ('\u{20000}', + '\u{2fffd}', 2, 2), ('\u{30000}', '\u{3fffd}', 2, 2), ('\u{e0001}', '\u{e0001}', 0, 0), + ('\u{e0020}', '\u{e007f}', 0, 0), ('\u{e0100}', '\u{e01ef}', 0, 0), ('\u{f0000}', + '\u{ffffd}', 1, 2), ('\u{100000}', '\u{10fffd}', 1, 2) ]; } diff -Nru cargo-0.44.1/vendor/unicode-xid/.cargo-checksum.json cargo-0.47.0/vendor/unicode-xid/.cargo-checksum.json --- cargo-0.44.1/vendor/unicode-xid/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-xid/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"} \ No newline at end of file +{"files":{},"package":"f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/unicode-xid/Cargo.toml cargo-0.47.0/vendor/unicode-xid/Cargo.toml --- cargo-0.44.1/vendor/unicode-xid/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-xid/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,7 +12,7 @@ [package] name = "unicode-xid" -version = "0.2.0" +version = "0.2.1" authors = ["erick.tryzelaar ", "kwantam "] exclude = ["/scripts/*", "/.travis.yml"] description = "Determine whether characters have the XID_Start\nor XID_Continue properties according to\nUnicode Standard Annex #31.\n" diff -Nru cargo-0.44.1/vendor/unicode-xid/README.md cargo-0.47.0/vendor/unicode-xid/README.md --- cargo-0.44.1/vendor/unicode-xid/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-xid/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -23,15 +23,6 @@ unicode-xid supports a `no_std` feature. This eliminates dependence on std, and instead uses equivalent functions from core. -# crates.io - -You can use this package in your project by adding the following -to your `Cargo.toml`: - -```toml -[dependencies] -unicode-xid = "0.1.0" -``` # changelog diff -Nru cargo-0.44.1/vendor/unicode-xid/src/lib.rs cargo-0.47.0/vendor/unicode-xid/src/lib.rs --- cargo-0.44.1/vendor/unicode-xid/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-xid/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -27,20 +27,13 @@ //! unicode-xid supports a `no_std` feature. This eliminates dependence //! on std, and instead uses equivalent functions from core. //! -//! # crates.io -//! -//! You can use this package in your project by adding the following -//! to your `Cargo.toml`: -//! -//! ```toml -//! [dependencies] -//! unicode-xid = "0.0.4" -//! ``` - -#![deny(missing_docs, unsafe_code)] -#![doc(html_logo_url = "https://unicode-rs.github.io/unicode-rs_sm.png", - html_favicon_url = "https://unicode-rs.github.io/unicode-rs_sm.png")] +#![forbid(unsafe_code)] +#![deny(missing_docs)] +#![doc( + html_logo_url = "https://unicode-rs.github.io/unicode-rs_sm.png", + html_favicon_url = "https://unicode-rs.github.io/unicode-rs_sm.png" +)] #![no_std] #![cfg_attr(feature = "bench", feature(test, unicode_internals))] @@ -80,8 +73,12 @@ impl UnicodeXID for char { #[inline] - fn is_xid_start(self) -> bool { derived_property::XID_Start(self) } + fn is_xid_start(self) -> bool { + derived_property::XID_Start(self) + } #[inline] - fn is_xid_continue(self) -> bool { derived_property::XID_Continue(self) } + fn is_xid_continue(self) -> bool { + derived_property::XID_Continue(self) + } } diff -Nru cargo-0.44.1/vendor/unicode-xid/src/tables.rs cargo-0.47.0/vendor/unicode-xid/src/tables.rs --- cargo-0.44.1/vendor/unicode-xid/src/tables.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-xid/src/tables.rs 2020-10-01 21:38:28.000000000 +0000 @@ -14,239 +14,767 @@ /// The version of [Unicode](http://www.unicode.org/) /// that this version of unicode-xid is based on. -pub const UNICODE_VERSION: (u64, u64, u64) = (12, 1, 0); +pub const UNICODE_VERSION: (u64, u64, u64) = (13, 0, 0); -fn bsearch_range_table(c: char, r: &[(char,char)]) -> bool { - use core::cmp::Ordering::{Equal, Less, Greater}; +fn bsearch_range_table(c: char, r: &[(char, char)]) -> bool { + use core::cmp::Ordering::{Equal, Greater, Less}; - r.binary_search_by(|&(lo,hi)| { - if lo <= c && c <= hi { Equal } - else if hi < c { Less } - else { Greater } - }).is_ok() + r.binary_search_by(|&(lo, hi)| { + // Because ASCII ranges are at the start of the tables, a search for an + // ASCII char will involve more `Greater` results (i.e. the `(lo,hi)` + // table entry is greater than `c`) than `Less` results. And given that + // ASCII chars are so common, it makes sense to favor them. Therefore, + // the `Greater` case is tested for before the `Less` case. + if lo > c { + Greater + } else if hi < c { + Less + } else { + Equal + } + }) + .is_ok() } pub mod derived_property { pub const XID_Continue_table: &[(char, char)] = &[ - ('\u{30}', '\u{39}'), ('\u{41}', '\u{5a}'), ('\u{5f}', '\u{5f}'), ('\u{61}', '\u{7a}'), - ('\u{aa}', '\u{aa}'), ('\u{b5}', '\u{b5}'), ('\u{b7}', '\u{b7}'), ('\u{ba}', '\u{ba}'), - ('\u{c0}', '\u{d6}'), ('\u{d8}', '\u{f6}'), ('\u{f8}', '\u{2c1}'), ('\u{2c6}', '\u{2d1}'), - ('\u{2e0}', '\u{2e4}'), ('\u{2ec}', '\u{2ec}'), ('\u{2ee}', '\u{2ee}'), ('\u{300}', - '\u{374}'), ('\u{376}', '\u{377}'), ('\u{37b}', '\u{37d}'), ('\u{37f}', '\u{37f}'), - ('\u{386}', '\u{38a}'), ('\u{38c}', '\u{38c}'), ('\u{38e}', '\u{3a1}'), ('\u{3a3}', - '\u{3f5}'), ('\u{3f7}', '\u{481}'), ('\u{483}', '\u{487}'), ('\u{48a}', '\u{52f}'), - ('\u{531}', '\u{556}'), ('\u{559}', '\u{559}'), ('\u{560}', '\u{588}'), ('\u{591}', - '\u{5bd}'), ('\u{5bf}', '\u{5bf}'), ('\u{5c1}', '\u{5c2}'), ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), ('\u{5d0}', '\u{5ea}'), ('\u{5ef}', '\u{5f2}'), ('\u{610}', - '\u{61a}'), ('\u{620}', '\u{669}'), ('\u{66e}', '\u{6d3}'), ('\u{6d5}', '\u{6dc}'), - ('\u{6df}', '\u{6e8}'), ('\u{6ea}', '\u{6fc}'), ('\u{6ff}', '\u{6ff}'), ('\u{710}', - '\u{74a}'), ('\u{74d}', '\u{7b1}'), ('\u{7c0}', '\u{7f5}'), ('\u{7fa}', '\u{7fa}'), - ('\u{7fd}', '\u{7fd}'), ('\u{800}', '\u{82d}'), ('\u{840}', '\u{85b}'), ('\u{860}', - '\u{86a}'), ('\u{8a0}', '\u{8b4}'), ('\u{8b6}', '\u{8bd}'), ('\u{8d3}', '\u{8e1}'), - ('\u{8e3}', '\u{963}'), ('\u{966}', '\u{96f}'), ('\u{971}', '\u{983}'), ('\u{985}', - '\u{98c}'), ('\u{98f}', '\u{990}'), ('\u{993}', '\u{9a8}'), ('\u{9aa}', '\u{9b0}'), - ('\u{9b2}', '\u{9b2}'), ('\u{9b6}', '\u{9b9}'), ('\u{9bc}', '\u{9c4}'), ('\u{9c7}', - '\u{9c8}'), ('\u{9cb}', '\u{9ce}'), ('\u{9d7}', '\u{9d7}'), ('\u{9dc}', '\u{9dd}'), - ('\u{9df}', '\u{9e3}'), ('\u{9e6}', '\u{9f1}'), ('\u{9fc}', '\u{9fc}'), ('\u{9fe}', - '\u{9fe}'), ('\u{a01}', '\u{a03}'), ('\u{a05}', '\u{a0a}'), ('\u{a0f}', '\u{a10}'), - ('\u{a13}', '\u{a28}'), ('\u{a2a}', '\u{a30}'), ('\u{a32}', '\u{a33}'), ('\u{a35}', - '\u{a36}'), ('\u{a38}', '\u{a39}'), ('\u{a3c}', '\u{a3c}'), ('\u{a3e}', '\u{a42}'), - ('\u{a47}', '\u{a48}'), ('\u{a4b}', '\u{a4d}'), ('\u{a51}', '\u{a51}'), ('\u{a59}', - '\u{a5c}'), ('\u{a5e}', '\u{a5e}'), ('\u{a66}', '\u{a75}'), ('\u{a81}', '\u{a83}'), - ('\u{a85}', '\u{a8d}'), ('\u{a8f}', '\u{a91}'), ('\u{a93}', '\u{aa8}'), ('\u{aaa}', - '\u{ab0}'), ('\u{ab2}', '\u{ab3}'), ('\u{ab5}', '\u{ab9}'), ('\u{abc}', '\u{ac5}'), - ('\u{ac7}', '\u{ac9}'), ('\u{acb}', '\u{acd}'), ('\u{ad0}', '\u{ad0}'), ('\u{ae0}', - '\u{ae3}'), ('\u{ae6}', '\u{aef}'), ('\u{af9}', '\u{aff}'), ('\u{b01}', '\u{b03}'), - ('\u{b05}', '\u{b0c}'), ('\u{b0f}', '\u{b10}'), ('\u{b13}', '\u{b28}'), ('\u{b2a}', - '\u{b30}'), ('\u{b32}', '\u{b33}'), ('\u{b35}', '\u{b39}'), ('\u{b3c}', '\u{b44}'), - ('\u{b47}', '\u{b48}'), ('\u{b4b}', '\u{b4d}'), ('\u{b56}', '\u{b57}'), ('\u{b5c}', - '\u{b5d}'), ('\u{b5f}', '\u{b63}'), ('\u{b66}', '\u{b6f}'), ('\u{b71}', '\u{b71}'), - ('\u{b82}', '\u{b83}'), ('\u{b85}', '\u{b8a}'), ('\u{b8e}', '\u{b90}'), ('\u{b92}', - '\u{b95}'), ('\u{b99}', '\u{b9a}'), ('\u{b9c}', '\u{b9c}'), ('\u{b9e}', '\u{b9f}'), - ('\u{ba3}', '\u{ba4}'), ('\u{ba8}', '\u{baa}'), ('\u{bae}', '\u{bb9}'), ('\u{bbe}', - '\u{bc2}'), ('\u{bc6}', '\u{bc8}'), ('\u{bca}', '\u{bcd}'), ('\u{bd0}', '\u{bd0}'), - ('\u{bd7}', '\u{bd7}'), ('\u{be6}', '\u{bef}'), ('\u{c00}', '\u{c0c}'), ('\u{c0e}', - '\u{c10}'), ('\u{c12}', '\u{c28}'), ('\u{c2a}', '\u{c39}'), ('\u{c3d}', '\u{c44}'), - ('\u{c46}', '\u{c48}'), ('\u{c4a}', '\u{c4d}'), ('\u{c55}', '\u{c56}'), ('\u{c58}', - '\u{c5a}'), ('\u{c60}', '\u{c63}'), ('\u{c66}', '\u{c6f}'), ('\u{c80}', '\u{c83}'), - ('\u{c85}', '\u{c8c}'), ('\u{c8e}', '\u{c90}'), ('\u{c92}', '\u{ca8}'), ('\u{caa}', - '\u{cb3}'), ('\u{cb5}', '\u{cb9}'), ('\u{cbc}', '\u{cc4}'), ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccd}'), ('\u{cd5}', '\u{cd6}'), ('\u{cde}', '\u{cde}'), ('\u{ce0}', - '\u{ce3}'), ('\u{ce6}', '\u{cef}'), ('\u{cf1}', '\u{cf2}'), ('\u{d00}', '\u{d03}'), - ('\u{d05}', '\u{d0c}'), ('\u{d0e}', '\u{d10}'), ('\u{d12}', '\u{d44}'), ('\u{d46}', - '\u{d48}'), ('\u{d4a}', '\u{d4e}'), ('\u{d54}', '\u{d57}'), ('\u{d5f}', '\u{d63}'), - ('\u{d66}', '\u{d6f}'), ('\u{d7a}', '\u{d7f}'), ('\u{d82}', '\u{d83}'), ('\u{d85}', - '\u{d96}'), ('\u{d9a}', '\u{db1}'), ('\u{db3}', '\u{dbb}'), ('\u{dbd}', '\u{dbd}'), - ('\u{dc0}', '\u{dc6}'), ('\u{dca}', '\u{dca}'), ('\u{dcf}', '\u{dd4}'), ('\u{dd6}', - '\u{dd6}'), ('\u{dd8}', '\u{ddf}'), ('\u{de6}', '\u{def}'), ('\u{df2}', '\u{df3}'), - ('\u{e01}', '\u{e3a}'), ('\u{e40}', '\u{e4e}'), ('\u{e50}', '\u{e59}'), ('\u{e81}', - '\u{e82}'), ('\u{e84}', '\u{e84}'), ('\u{e86}', '\u{e8a}'), ('\u{e8c}', '\u{ea3}'), - ('\u{ea5}', '\u{ea5}'), ('\u{ea7}', '\u{ebd}'), ('\u{ec0}', '\u{ec4}'), ('\u{ec6}', - '\u{ec6}'), ('\u{ec8}', '\u{ecd}'), ('\u{ed0}', '\u{ed9}'), ('\u{edc}', '\u{edf}'), - ('\u{f00}', '\u{f00}'), ('\u{f18}', '\u{f19}'), ('\u{f20}', '\u{f29}'), ('\u{f35}', - '\u{f35}'), ('\u{f37}', '\u{f37}'), ('\u{f39}', '\u{f39}'), ('\u{f3e}', '\u{f47}'), - ('\u{f49}', '\u{f6c}'), ('\u{f71}', '\u{f84}'), ('\u{f86}', '\u{f97}'), ('\u{f99}', - '\u{fbc}'), ('\u{fc6}', '\u{fc6}'), ('\u{1000}', '\u{1049}'), ('\u{1050}', '\u{109d}'), - ('\u{10a0}', '\u{10c5}'), ('\u{10c7}', '\u{10c7}'), ('\u{10cd}', '\u{10cd}'), ('\u{10d0}', - '\u{10fa}'), ('\u{10fc}', '\u{1248}'), ('\u{124a}', '\u{124d}'), ('\u{1250}', '\u{1256}'), - ('\u{1258}', '\u{1258}'), ('\u{125a}', '\u{125d}'), ('\u{1260}', '\u{1288}'), ('\u{128a}', - '\u{128d}'), ('\u{1290}', '\u{12b0}'), ('\u{12b2}', '\u{12b5}'), ('\u{12b8}', '\u{12be}'), - ('\u{12c0}', '\u{12c0}'), ('\u{12c2}', '\u{12c5}'), ('\u{12c8}', '\u{12d6}'), ('\u{12d8}', - '\u{1310}'), ('\u{1312}', '\u{1315}'), ('\u{1318}', '\u{135a}'), ('\u{135d}', '\u{135f}'), - ('\u{1369}', '\u{1371}'), ('\u{1380}', '\u{138f}'), ('\u{13a0}', '\u{13f5}'), ('\u{13f8}', - '\u{13fd}'), ('\u{1401}', '\u{166c}'), ('\u{166f}', '\u{167f}'), ('\u{1681}', '\u{169a}'), - ('\u{16a0}', '\u{16ea}'), ('\u{16ee}', '\u{16f8}'), ('\u{1700}', '\u{170c}'), ('\u{170e}', - '\u{1714}'), ('\u{1720}', '\u{1734}'), ('\u{1740}', '\u{1753}'), ('\u{1760}', '\u{176c}'), - ('\u{176e}', '\u{1770}'), ('\u{1772}', '\u{1773}'), ('\u{1780}', '\u{17d3}'), ('\u{17d7}', - '\u{17d7}'), ('\u{17dc}', '\u{17dd}'), ('\u{17e0}', '\u{17e9}'), ('\u{180b}', '\u{180d}'), - ('\u{1810}', '\u{1819}'), ('\u{1820}', '\u{1878}'), ('\u{1880}', '\u{18aa}'), ('\u{18b0}', - '\u{18f5}'), ('\u{1900}', '\u{191e}'), ('\u{1920}', '\u{192b}'), ('\u{1930}', '\u{193b}'), - ('\u{1946}', '\u{196d}'), ('\u{1970}', '\u{1974}'), ('\u{1980}', '\u{19ab}'), ('\u{19b0}', - '\u{19c9}'), ('\u{19d0}', '\u{19da}'), ('\u{1a00}', '\u{1a1b}'), ('\u{1a20}', '\u{1a5e}'), - ('\u{1a60}', '\u{1a7c}'), ('\u{1a7f}', '\u{1a89}'), ('\u{1a90}', '\u{1a99}'), ('\u{1aa7}', - '\u{1aa7}'), ('\u{1ab0}', '\u{1abd}'), ('\u{1b00}', '\u{1b4b}'), ('\u{1b50}', '\u{1b59}'), - ('\u{1b6b}', '\u{1b73}'), ('\u{1b80}', '\u{1bf3}'), ('\u{1c00}', '\u{1c37}'), ('\u{1c40}', - '\u{1c49}'), ('\u{1c4d}', '\u{1c7d}'), ('\u{1c80}', '\u{1c88}'), ('\u{1c90}', '\u{1cba}'), - ('\u{1cbd}', '\u{1cbf}'), ('\u{1cd0}', '\u{1cd2}'), ('\u{1cd4}', '\u{1cfa}'), ('\u{1d00}', - '\u{1df9}'), ('\u{1dfb}', '\u{1f15}'), ('\u{1f18}', '\u{1f1d}'), ('\u{1f20}', '\u{1f45}'), - ('\u{1f48}', '\u{1f4d}'), ('\u{1f50}', '\u{1f57}'), ('\u{1f59}', '\u{1f59}'), ('\u{1f5b}', - '\u{1f5b}'), ('\u{1f5d}', '\u{1f5d}'), ('\u{1f5f}', '\u{1f7d}'), ('\u{1f80}', '\u{1fb4}'), - ('\u{1fb6}', '\u{1fbc}'), ('\u{1fbe}', '\u{1fbe}'), ('\u{1fc2}', '\u{1fc4}'), ('\u{1fc6}', - '\u{1fcc}'), ('\u{1fd0}', '\u{1fd3}'), ('\u{1fd6}', '\u{1fdb}'), ('\u{1fe0}', '\u{1fec}'), - ('\u{1ff2}', '\u{1ff4}'), ('\u{1ff6}', '\u{1ffc}'), ('\u{203f}', '\u{2040}'), ('\u{2054}', - '\u{2054}'), ('\u{2071}', '\u{2071}'), ('\u{207f}', '\u{207f}'), ('\u{2090}', '\u{209c}'), - ('\u{20d0}', '\u{20dc}'), ('\u{20e1}', '\u{20e1}'), ('\u{20e5}', '\u{20f0}'), ('\u{2102}', - '\u{2102}'), ('\u{2107}', '\u{2107}'), ('\u{210a}', '\u{2113}'), ('\u{2115}', '\u{2115}'), - ('\u{2118}', '\u{211d}'), ('\u{2124}', '\u{2124}'), ('\u{2126}', '\u{2126}'), ('\u{2128}', - '\u{2128}'), ('\u{212a}', '\u{2139}'), ('\u{213c}', '\u{213f}'), ('\u{2145}', '\u{2149}'), - ('\u{214e}', '\u{214e}'), ('\u{2160}', '\u{2188}'), ('\u{2c00}', '\u{2c2e}'), ('\u{2c30}', - '\u{2c5e}'), ('\u{2c60}', '\u{2ce4}'), ('\u{2ceb}', '\u{2cf3}'), ('\u{2d00}', '\u{2d25}'), - ('\u{2d27}', '\u{2d27}'), ('\u{2d2d}', '\u{2d2d}'), ('\u{2d30}', '\u{2d67}'), ('\u{2d6f}', - '\u{2d6f}'), ('\u{2d7f}', '\u{2d96}'), ('\u{2da0}', '\u{2da6}'), ('\u{2da8}', '\u{2dae}'), - ('\u{2db0}', '\u{2db6}'), ('\u{2db8}', '\u{2dbe}'), ('\u{2dc0}', '\u{2dc6}'), ('\u{2dc8}', - '\u{2dce}'), ('\u{2dd0}', '\u{2dd6}'), ('\u{2dd8}', '\u{2dde}'), ('\u{2de0}', '\u{2dff}'), - ('\u{3005}', '\u{3007}'), ('\u{3021}', '\u{302f}'), ('\u{3031}', '\u{3035}'), ('\u{3038}', - '\u{303c}'), ('\u{3041}', '\u{3096}'), ('\u{3099}', '\u{309a}'), ('\u{309d}', '\u{309f}'), - ('\u{30a1}', '\u{30fa}'), ('\u{30fc}', '\u{30ff}'), ('\u{3105}', '\u{312f}'), ('\u{3131}', - '\u{318e}'), ('\u{31a0}', '\u{31ba}'), ('\u{31f0}', '\u{31ff}'), ('\u{3400}', '\u{4db5}'), - ('\u{4e00}', '\u{9fef}'), ('\u{a000}', '\u{a48c}'), ('\u{a4d0}', '\u{a4fd}'), ('\u{a500}', - '\u{a60c}'), ('\u{a610}', '\u{a62b}'), ('\u{a640}', '\u{a66f}'), ('\u{a674}', '\u{a67d}'), - ('\u{a67f}', '\u{a6f1}'), ('\u{a717}', '\u{a71f}'), ('\u{a722}', '\u{a788}'), ('\u{a78b}', - '\u{a7bf}'), ('\u{a7c2}', '\u{a7c6}'), ('\u{a7f7}', '\u{a827}'), ('\u{a840}', '\u{a873}'), - ('\u{a880}', '\u{a8c5}'), ('\u{a8d0}', '\u{a8d9}'), ('\u{a8e0}', '\u{a8f7}'), ('\u{a8fb}', - '\u{a8fb}'), ('\u{a8fd}', '\u{a92d}'), ('\u{a930}', '\u{a953}'), ('\u{a960}', '\u{a97c}'), - ('\u{a980}', '\u{a9c0}'), ('\u{a9cf}', '\u{a9d9}'), ('\u{a9e0}', '\u{a9fe}'), ('\u{aa00}', - '\u{aa36}'), ('\u{aa40}', '\u{aa4d}'), ('\u{aa50}', '\u{aa59}'), ('\u{aa60}', '\u{aa76}'), - ('\u{aa7a}', '\u{aac2}'), ('\u{aadb}', '\u{aadd}'), ('\u{aae0}', '\u{aaef}'), ('\u{aaf2}', - '\u{aaf6}'), ('\u{ab01}', '\u{ab06}'), ('\u{ab09}', '\u{ab0e}'), ('\u{ab11}', '\u{ab16}'), - ('\u{ab20}', '\u{ab26}'), ('\u{ab28}', '\u{ab2e}'), ('\u{ab30}', '\u{ab5a}'), ('\u{ab5c}', - '\u{ab67}'), ('\u{ab70}', '\u{abea}'), ('\u{abec}', '\u{abed}'), ('\u{abf0}', '\u{abf9}'), - ('\u{ac00}', '\u{d7a3}'), ('\u{d7b0}', '\u{d7c6}'), ('\u{d7cb}', '\u{d7fb}'), ('\u{f900}', - '\u{fa6d}'), ('\u{fa70}', '\u{fad9}'), ('\u{fb00}', '\u{fb06}'), ('\u{fb13}', '\u{fb17}'), - ('\u{fb1d}', '\u{fb28}'), ('\u{fb2a}', '\u{fb36}'), ('\u{fb38}', '\u{fb3c}'), ('\u{fb3e}', - '\u{fb3e}'), ('\u{fb40}', '\u{fb41}'), ('\u{fb43}', '\u{fb44}'), ('\u{fb46}', '\u{fbb1}'), - ('\u{fbd3}', '\u{fc5d}'), ('\u{fc64}', '\u{fd3d}'), ('\u{fd50}', '\u{fd8f}'), ('\u{fd92}', - '\u{fdc7}'), ('\u{fdf0}', '\u{fdf9}'), ('\u{fe00}', '\u{fe0f}'), ('\u{fe20}', '\u{fe2f}'), - ('\u{fe33}', '\u{fe34}'), ('\u{fe4d}', '\u{fe4f}'), ('\u{fe71}', '\u{fe71}'), ('\u{fe73}', - '\u{fe73}'), ('\u{fe77}', '\u{fe77}'), ('\u{fe79}', '\u{fe79}'), ('\u{fe7b}', '\u{fe7b}'), - ('\u{fe7d}', '\u{fe7d}'), ('\u{fe7f}', '\u{fefc}'), ('\u{ff10}', '\u{ff19}'), ('\u{ff21}', - '\u{ff3a}'), ('\u{ff3f}', '\u{ff3f}'), ('\u{ff41}', '\u{ff5a}'), ('\u{ff66}', '\u{ffbe}'), - ('\u{ffc2}', '\u{ffc7}'), ('\u{ffca}', '\u{ffcf}'), ('\u{ffd2}', '\u{ffd7}'), ('\u{ffda}', - '\u{ffdc}'), ('\u{10000}', '\u{1000b}'), ('\u{1000d}', '\u{10026}'), ('\u{10028}', - '\u{1003a}'), ('\u{1003c}', '\u{1003d}'), ('\u{1003f}', '\u{1004d}'), ('\u{10050}', - '\u{1005d}'), ('\u{10080}', '\u{100fa}'), ('\u{10140}', '\u{10174}'), ('\u{101fd}', - '\u{101fd}'), ('\u{10280}', '\u{1029c}'), ('\u{102a0}', '\u{102d0}'), ('\u{102e0}', - '\u{102e0}'), ('\u{10300}', '\u{1031f}'), ('\u{1032d}', '\u{1034a}'), ('\u{10350}', - '\u{1037a}'), ('\u{10380}', '\u{1039d}'), ('\u{103a0}', '\u{103c3}'), ('\u{103c8}', - '\u{103cf}'), ('\u{103d1}', '\u{103d5}'), ('\u{10400}', '\u{1049d}'), ('\u{104a0}', - '\u{104a9}'), ('\u{104b0}', '\u{104d3}'), ('\u{104d8}', '\u{104fb}'), ('\u{10500}', - '\u{10527}'), ('\u{10530}', '\u{10563}'), ('\u{10600}', '\u{10736}'), ('\u{10740}', - '\u{10755}'), ('\u{10760}', '\u{10767}'), ('\u{10800}', '\u{10805}'), ('\u{10808}', - '\u{10808}'), ('\u{1080a}', '\u{10835}'), ('\u{10837}', '\u{10838}'), ('\u{1083c}', - '\u{1083c}'), ('\u{1083f}', '\u{10855}'), ('\u{10860}', '\u{10876}'), ('\u{10880}', - '\u{1089e}'), ('\u{108e0}', '\u{108f2}'), ('\u{108f4}', '\u{108f5}'), ('\u{10900}', - '\u{10915}'), ('\u{10920}', '\u{10939}'), ('\u{10980}', '\u{109b7}'), ('\u{109be}', - '\u{109bf}'), ('\u{10a00}', '\u{10a03}'), ('\u{10a05}', '\u{10a06}'), ('\u{10a0c}', - '\u{10a13}'), ('\u{10a15}', '\u{10a17}'), ('\u{10a19}', '\u{10a35}'), ('\u{10a38}', - '\u{10a3a}'), ('\u{10a3f}', '\u{10a3f}'), ('\u{10a60}', '\u{10a7c}'), ('\u{10a80}', - '\u{10a9c}'), ('\u{10ac0}', '\u{10ac7}'), ('\u{10ac9}', '\u{10ae6}'), ('\u{10b00}', - '\u{10b35}'), ('\u{10b40}', '\u{10b55}'), ('\u{10b60}', '\u{10b72}'), ('\u{10b80}', - '\u{10b91}'), ('\u{10c00}', '\u{10c48}'), ('\u{10c80}', '\u{10cb2}'), ('\u{10cc0}', - '\u{10cf2}'), ('\u{10d00}', '\u{10d27}'), ('\u{10d30}', '\u{10d39}'), ('\u{10f00}', - '\u{10f1c}'), ('\u{10f27}', '\u{10f27}'), ('\u{10f30}', '\u{10f50}'), ('\u{10fe0}', - '\u{10ff6}'), ('\u{11000}', '\u{11046}'), ('\u{11066}', '\u{1106f}'), ('\u{1107f}', - '\u{110ba}'), ('\u{110d0}', '\u{110e8}'), ('\u{110f0}', '\u{110f9}'), ('\u{11100}', - '\u{11134}'), ('\u{11136}', '\u{1113f}'), ('\u{11144}', '\u{11146}'), ('\u{11150}', - '\u{11173}'), ('\u{11176}', '\u{11176}'), ('\u{11180}', '\u{111c4}'), ('\u{111c9}', - '\u{111cc}'), ('\u{111d0}', '\u{111da}'), ('\u{111dc}', '\u{111dc}'), ('\u{11200}', - '\u{11211}'), ('\u{11213}', '\u{11237}'), ('\u{1123e}', '\u{1123e}'), ('\u{11280}', - '\u{11286}'), ('\u{11288}', '\u{11288}'), ('\u{1128a}', '\u{1128d}'), ('\u{1128f}', - '\u{1129d}'), ('\u{1129f}', '\u{112a8}'), ('\u{112b0}', '\u{112ea}'), ('\u{112f0}', - '\u{112f9}'), ('\u{11300}', '\u{11303}'), ('\u{11305}', '\u{1130c}'), ('\u{1130f}', - '\u{11310}'), ('\u{11313}', '\u{11328}'), ('\u{1132a}', '\u{11330}'), ('\u{11332}', - '\u{11333}'), ('\u{11335}', '\u{11339}'), ('\u{1133b}', '\u{11344}'), ('\u{11347}', - '\u{11348}'), ('\u{1134b}', '\u{1134d}'), ('\u{11350}', '\u{11350}'), ('\u{11357}', - '\u{11357}'), ('\u{1135d}', '\u{11363}'), ('\u{11366}', '\u{1136c}'), ('\u{11370}', - '\u{11374}'), ('\u{11400}', '\u{1144a}'), ('\u{11450}', '\u{11459}'), ('\u{1145e}', - '\u{1145f}'), ('\u{11480}', '\u{114c5}'), ('\u{114c7}', '\u{114c7}'), ('\u{114d0}', - '\u{114d9}'), ('\u{11580}', '\u{115b5}'), ('\u{115b8}', '\u{115c0}'), ('\u{115d8}', - '\u{115dd}'), ('\u{11600}', '\u{11640}'), ('\u{11644}', '\u{11644}'), ('\u{11650}', - '\u{11659}'), ('\u{11680}', '\u{116b8}'), ('\u{116c0}', '\u{116c9}'), ('\u{11700}', - '\u{1171a}'), ('\u{1171d}', '\u{1172b}'), ('\u{11730}', '\u{11739}'), ('\u{11800}', - '\u{1183a}'), ('\u{118a0}', '\u{118e9}'), ('\u{118ff}', '\u{118ff}'), ('\u{119a0}', - '\u{119a7}'), ('\u{119aa}', '\u{119d7}'), ('\u{119da}', '\u{119e1}'), ('\u{119e3}', - '\u{119e4}'), ('\u{11a00}', '\u{11a3e}'), ('\u{11a47}', '\u{11a47}'), ('\u{11a50}', - '\u{11a99}'), ('\u{11a9d}', '\u{11a9d}'), ('\u{11ac0}', '\u{11af8}'), ('\u{11c00}', - '\u{11c08}'), ('\u{11c0a}', '\u{11c36}'), ('\u{11c38}', '\u{11c40}'), ('\u{11c50}', - '\u{11c59}'), ('\u{11c72}', '\u{11c8f}'), ('\u{11c92}', '\u{11ca7}'), ('\u{11ca9}', - '\u{11cb6}'), ('\u{11d00}', '\u{11d06}'), ('\u{11d08}', '\u{11d09}'), ('\u{11d0b}', - '\u{11d36}'), ('\u{11d3a}', '\u{11d3a}'), ('\u{11d3c}', '\u{11d3d}'), ('\u{11d3f}', - '\u{11d47}'), ('\u{11d50}', '\u{11d59}'), ('\u{11d60}', '\u{11d65}'), ('\u{11d67}', - '\u{11d68}'), ('\u{11d6a}', '\u{11d8e}'), ('\u{11d90}', '\u{11d91}'), ('\u{11d93}', - '\u{11d98}'), ('\u{11da0}', '\u{11da9}'), ('\u{11ee0}', '\u{11ef6}'), ('\u{12000}', - '\u{12399}'), ('\u{12400}', '\u{1246e}'), ('\u{12480}', '\u{12543}'), ('\u{13000}', - '\u{1342e}'), ('\u{14400}', '\u{14646}'), ('\u{16800}', '\u{16a38}'), ('\u{16a40}', - '\u{16a5e}'), ('\u{16a60}', '\u{16a69}'), ('\u{16ad0}', '\u{16aed}'), ('\u{16af0}', - '\u{16af4}'), ('\u{16b00}', '\u{16b36}'), ('\u{16b40}', '\u{16b43}'), ('\u{16b50}', - '\u{16b59}'), ('\u{16b63}', '\u{16b77}'), ('\u{16b7d}', '\u{16b8f}'), ('\u{16e40}', - '\u{16e7f}'), ('\u{16f00}', '\u{16f4a}'), ('\u{16f4f}', '\u{16f87}'), ('\u{16f8f}', - '\u{16f9f}'), ('\u{16fe0}', '\u{16fe1}'), ('\u{16fe3}', '\u{16fe3}'), ('\u{17000}', - '\u{187f7}'), ('\u{18800}', '\u{18af2}'), ('\u{1b000}', '\u{1b11e}'), ('\u{1b150}', - '\u{1b152}'), ('\u{1b164}', '\u{1b167}'), ('\u{1b170}', '\u{1b2fb}'), ('\u{1bc00}', - '\u{1bc6a}'), ('\u{1bc70}', '\u{1bc7c}'), ('\u{1bc80}', '\u{1bc88}'), ('\u{1bc90}', - '\u{1bc99}'), ('\u{1bc9d}', '\u{1bc9e}'), ('\u{1d165}', '\u{1d169}'), ('\u{1d16d}', - '\u{1d172}'), ('\u{1d17b}', '\u{1d182}'), ('\u{1d185}', '\u{1d18b}'), ('\u{1d1aa}', - '\u{1d1ad}'), ('\u{1d242}', '\u{1d244}'), ('\u{1d400}', '\u{1d454}'), ('\u{1d456}', - '\u{1d49c}'), ('\u{1d49e}', '\u{1d49f}'), ('\u{1d4a2}', '\u{1d4a2}'), ('\u{1d4a5}', - '\u{1d4a6}'), ('\u{1d4a9}', '\u{1d4ac}'), ('\u{1d4ae}', '\u{1d4b9}'), ('\u{1d4bb}', - '\u{1d4bb}'), ('\u{1d4bd}', '\u{1d4c3}'), ('\u{1d4c5}', '\u{1d505}'), ('\u{1d507}', - '\u{1d50a}'), ('\u{1d50d}', '\u{1d514}'), ('\u{1d516}', '\u{1d51c}'), ('\u{1d51e}', - '\u{1d539}'), ('\u{1d53b}', '\u{1d53e}'), ('\u{1d540}', '\u{1d544}'), ('\u{1d546}', - '\u{1d546}'), ('\u{1d54a}', '\u{1d550}'), ('\u{1d552}', '\u{1d6a5}'), ('\u{1d6a8}', - '\u{1d6c0}'), ('\u{1d6c2}', '\u{1d6da}'), ('\u{1d6dc}', '\u{1d6fa}'), ('\u{1d6fc}', - '\u{1d714}'), ('\u{1d716}', '\u{1d734}'), ('\u{1d736}', '\u{1d74e}'), ('\u{1d750}', - '\u{1d76e}'), ('\u{1d770}', '\u{1d788}'), ('\u{1d78a}', '\u{1d7a8}'), ('\u{1d7aa}', - '\u{1d7c2}'), ('\u{1d7c4}', '\u{1d7cb}'), ('\u{1d7ce}', '\u{1d7ff}'), ('\u{1da00}', - '\u{1da36}'), ('\u{1da3b}', '\u{1da6c}'), ('\u{1da75}', '\u{1da75}'), ('\u{1da84}', - '\u{1da84}'), ('\u{1da9b}', '\u{1da9f}'), ('\u{1daa1}', '\u{1daaf}'), ('\u{1e000}', - '\u{1e006}'), ('\u{1e008}', '\u{1e018}'), ('\u{1e01b}', '\u{1e021}'), ('\u{1e023}', - '\u{1e024}'), ('\u{1e026}', '\u{1e02a}'), ('\u{1e100}', '\u{1e12c}'), ('\u{1e130}', - '\u{1e13d}'), ('\u{1e140}', '\u{1e149}'), ('\u{1e14e}', '\u{1e14e}'), ('\u{1e2c0}', - '\u{1e2f9}'), ('\u{1e800}', '\u{1e8c4}'), ('\u{1e8d0}', '\u{1e8d6}'), ('\u{1e900}', - '\u{1e94b}'), ('\u{1e950}', '\u{1e959}'), ('\u{1ee00}', '\u{1ee03}'), ('\u{1ee05}', - '\u{1ee1f}'), ('\u{1ee21}', '\u{1ee22}'), ('\u{1ee24}', '\u{1ee24}'), ('\u{1ee27}', - '\u{1ee27}'), ('\u{1ee29}', '\u{1ee32}'), ('\u{1ee34}', '\u{1ee37}'), ('\u{1ee39}', - '\u{1ee39}'), ('\u{1ee3b}', '\u{1ee3b}'), ('\u{1ee42}', '\u{1ee42}'), ('\u{1ee47}', - '\u{1ee47}'), ('\u{1ee49}', '\u{1ee49}'), ('\u{1ee4b}', '\u{1ee4b}'), ('\u{1ee4d}', - '\u{1ee4f}'), ('\u{1ee51}', '\u{1ee52}'), ('\u{1ee54}', '\u{1ee54}'), ('\u{1ee57}', - '\u{1ee57}'), ('\u{1ee59}', '\u{1ee59}'), ('\u{1ee5b}', '\u{1ee5b}'), ('\u{1ee5d}', - '\u{1ee5d}'), ('\u{1ee5f}', '\u{1ee5f}'), ('\u{1ee61}', '\u{1ee62}'), ('\u{1ee64}', - '\u{1ee64}'), ('\u{1ee67}', '\u{1ee6a}'), ('\u{1ee6c}', '\u{1ee72}'), ('\u{1ee74}', - '\u{1ee77}'), ('\u{1ee79}', '\u{1ee7c}'), ('\u{1ee7e}', '\u{1ee7e}'), ('\u{1ee80}', - '\u{1ee89}'), ('\u{1ee8b}', '\u{1ee9b}'), ('\u{1eea1}', '\u{1eea3}'), ('\u{1eea5}', - '\u{1eea9}'), ('\u{1eeab}', '\u{1eebb}'), ('\u{20000}', '\u{2a6d6}'), ('\u{2a700}', - '\u{2b734}'), ('\u{2b740}', '\u{2b81d}'), ('\u{2b820}', '\u{2cea1}'), ('\u{2ceb0}', - '\u{2ebe0}'), ('\u{2f800}', '\u{2fa1d}'), ('\u{e0100}', '\u{e01ef}') + ('\u{30}', '\u{39}'), + ('\u{41}', '\u{5a}'), + ('\u{5f}', '\u{5f}'), + ('\u{61}', '\u{7a}'), + ('\u{aa}', '\u{aa}'), + ('\u{b5}', '\u{b5}'), + ('\u{b7}', '\u{b7}'), + ('\u{ba}', '\u{ba}'), + ('\u{c0}', '\u{d6}'), + ('\u{d8}', '\u{f6}'), + ('\u{f8}', '\u{2c1}'), + ('\u{2c6}', '\u{2d1}'), + ('\u{2e0}', '\u{2e4}'), + ('\u{2ec}', '\u{2ec}'), + ('\u{2ee}', '\u{2ee}'), + ('\u{300}', '\u{374}'), + ('\u{376}', '\u{377}'), + ('\u{37b}', '\u{37d}'), + ('\u{37f}', '\u{37f}'), + ('\u{386}', '\u{38a}'), + ('\u{38c}', '\u{38c}'), + ('\u{38e}', '\u{3a1}'), + ('\u{3a3}', '\u{3f5}'), + ('\u{3f7}', '\u{481}'), + ('\u{483}', '\u{487}'), + ('\u{48a}', '\u{52f}'), + ('\u{531}', '\u{556}'), + ('\u{559}', '\u{559}'), + ('\u{560}', '\u{588}'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{5d0}', '\u{5ea}'), + ('\u{5ef}', '\u{5f2}'), + ('\u{610}', '\u{61a}'), + ('\u{620}', '\u{669}'), + ('\u{66e}', '\u{6d3}'), + ('\u{6d5}', '\u{6dc}'), + ('\u{6df}', '\u{6e8}'), + ('\u{6ea}', '\u{6fc}'), + ('\u{6ff}', '\u{6ff}'), + ('\u{710}', '\u{74a}'), + ('\u{74d}', '\u{7b1}'), + ('\u{7c0}', '\u{7f5}'), + ('\u{7fa}', '\u{7fa}'), + ('\u{7fd}', '\u{7fd}'), + ('\u{800}', '\u{82d}'), + ('\u{840}', '\u{85b}'), + ('\u{860}', '\u{86a}'), + ('\u{8a0}', '\u{8b4}'), + ('\u{8b6}', '\u{8c7}'), + ('\u{8d3}', '\u{8e1}'), + ('\u{8e3}', '\u{963}'), + ('\u{966}', '\u{96f}'), + ('\u{971}', '\u{983}'), + ('\u{985}', '\u{98c}'), + ('\u{98f}', '\u{990}'), + ('\u{993}', '\u{9a8}'), + ('\u{9aa}', '\u{9b0}'), + ('\u{9b2}', '\u{9b2}'), + ('\u{9b6}', '\u{9b9}'), + ('\u{9bc}', '\u{9c4}'), + ('\u{9c7}', '\u{9c8}'), + ('\u{9cb}', '\u{9ce}'), + ('\u{9d7}', '\u{9d7}'), + ('\u{9dc}', '\u{9dd}'), + ('\u{9df}', '\u{9e3}'), + ('\u{9e6}', '\u{9f1}'), + ('\u{9fc}', '\u{9fc}'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', '\u{a03}'), + ('\u{a05}', '\u{a0a}'), + ('\u{a0f}', '\u{a10}'), + ('\u{a13}', '\u{a28}'), + ('\u{a2a}', '\u{a30}'), + ('\u{a32}', '\u{a33}'), + ('\u{a35}', '\u{a36}'), + ('\u{a38}', '\u{a39}'), + ('\u{a3c}', '\u{a3c}'), + ('\u{a3e}', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a59}', '\u{a5c}'), + ('\u{a5e}', '\u{a5e}'), + ('\u{a66}', '\u{a75}'), + ('\u{a81}', '\u{a83}'), + ('\u{a85}', '\u{a8d}'), + ('\u{a8f}', '\u{a91}'), + ('\u{a93}', '\u{aa8}'), + ('\u{aaa}', '\u{ab0}'), + ('\u{ab2}', '\u{ab3}'), + ('\u{ab5}', '\u{ab9}'), + ('\u{abc}', '\u{ac5}'), + ('\u{ac7}', '\u{ac9}'), + ('\u{acb}', '\u{acd}'), + ('\u{ad0}', '\u{ad0}'), + ('\u{ae0}', '\u{ae3}'), + ('\u{ae6}', '\u{aef}'), + ('\u{af9}', '\u{aff}'), + ('\u{b01}', '\u{b03}'), + ('\u{b05}', '\u{b0c}'), + ('\u{b0f}', '\u{b10}'), + ('\u{b13}', '\u{b28}'), + ('\u{b2a}', '\u{b30}'), + ('\u{b32}', '\u{b33}'), + ('\u{b35}', '\u{b39}'), + ('\u{b3c}', '\u{b44}'), + ('\u{b47}', '\u{b48}'), + ('\u{b4b}', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('\u{b5c}', '\u{b5d}'), + ('\u{b5f}', '\u{b63}'), + ('\u{b66}', '\u{b6f}'), + ('\u{b71}', '\u{b71}'), + ('\u{b82}', '\u{b83}'), + ('\u{b85}', '\u{b8a}'), + ('\u{b8e}', '\u{b90}'), + ('\u{b92}', '\u{b95}'), + ('\u{b99}', '\u{b9a}'), + ('\u{b9c}', '\u{b9c}'), + ('\u{b9e}', '\u{b9f}'), + ('\u{ba3}', '\u{ba4}'), + ('\u{ba8}', '\u{baa}'), + ('\u{bae}', '\u{bb9}'), + ('\u{bbe}', '\u{bc2}'), + ('\u{bc6}', '\u{bc8}'), + ('\u{bca}', '\u{bcd}'), + ('\u{bd0}', '\u{bd0}'), + ('\u{bd7}', '\u{bd7}'), + ('\u{be6}', '\u{bef}'), + ('\u{c00}', '\u{c0c}'), + ('\u{c0e}', '\u{c10}'), + ('\u{c12}', '\u{c28}'), + ('\u{c2a}', '\u{c39}'), + ('\u{c3d}', '\u{c44}'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('\u{c58}', '\u{c5a}'), + ('\u{c60}', '\u{c63}'), + ('\u{c66}', '\u{c6f}'), + ('\u{c80}', '\u{c83}'), + ('\u{c85}', '\u{c8c}'), + ('\u{c8e}', '\u{c90}'), + ('\u{c92}', '\u{ca8}'), + ('\u{caa}', '\u{cb3}'), + ('\u{cb5}', '\u{cb9}'), + ('\u{cbc}', '\u{cc4}'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{cde}', '\u{cde}'), + ('\u{ce0}', '\u{ce3}'), + ('\u{ce6}', '\u{cef}'), + ('\u{cf1}', '\u{cf2}'), + ('\u{d00}', '\u{d0c}'), + ('\u{d0e}', '\u{d10}'), + ('\u{d12}', '\u{d44}'), + ('\u{d46}', '\u{d48}'), + ('\u{d4a}', '\u{d4e}'), + ('\u{d54}', '\u{d57}'), + ('\u{d5f}', '\u{d63}'), + ('\u{d66}', '\u{d6f}'), + ('\u{d7a}', '\u{d7f}'), + ('\u{d81}', '\u{d83}'), + ('\u{d85}', '\u{d96}'), + ('\u{d9a}', '\u{db1}'), + ('\u{db3}', '\u{dbb}'), + ('\u{dbd}', '\u{dbd}'), + ('\u{dc0}', '\u{dc6}'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('\u{dd8}', '\u{ddf}'), + ('\u{de6}', '\u{def}'), + ('\u{df2}', '\u{df3}'), + ('\u{e01}', '\u{e3a}'), + ('\u{e40}', '\u{e4e}'), + ('\u{e50}', '\u{e59}'), + ('\u{e81}', '\u{e82}'), + ('\u{e84}', '\u{e84}'), + ('\u{e86}', '\u{e8a}'), + ('\u{e8c}', '\u{ea3}'), + ('\u{ea5}', '\u{ea5}'), + ('\u{ea7}', '\u{ebd}'), + ('\u{ec0}', '\u{ec4}'), + ('\u{ec6}', '\u{ec6}'), + ('\u{ec8}', '\u{ecd}'), + ('\u{ed0}', '\u{ed9}'), + ('\u{edc}', '\u{edf}'), + ('\u{f00}', '\u{f00}'), + ('\u{f18}', '\u{f19}'), + ('\u{f20}', '\u{f29}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('\u{f3e}', '\u{f47}'), + ('\u{f49}', '\u{f6c}'), + ('\u{f71}', '\u{f84}'), + ('\u{f86}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('\u{1000}', '\u{1049}'), + ('\u{1050}', '\u{109d}'), + ('\u{10a0}', '\u{10c5}'), + ('\u{10c7}', '\u{10c7}'), + ('\u{10cd}', '\u{10cd}'), + ('\u{10d0}', '\u{10fa}'), + ('\u{10fc}', '\u{1248}'), + ('\u{124a}', '\u{124d}'), + ('\u{1250}', '\u{1256}'), + ('\u{1258}', '\u{1258}'), + ('\u{125a}', '\u{125d}'), + ('\u{1260}', '\u{1288}'), + ('\u{128a}', '\u{128d}'), + ('\u{1290}', '\u{12b0}'), + ('\u{12b2}', '\u{12b5}'), + ('\u{12b8}', '\u{12be}'), + ('\u{12c0}', '\u{12c0}'), + ('\u{12c2}', '\u{12c5}'), + ('\u{12c8}', '\u{12d6}'), + ('\u{12d8}', '\u{1310}'), + ('\u{1312}', '\u{1315}'), + ('\u{1318}', '\u{135a}'), + ('\u{135d}', '\u{135f}'), + ('\u{1369}', '\u{1371}'), + ('\u{1380}', '\u{138f}'), + ('\u{13a0}', '\u{13f5}'), + ('\u{13f8}', '\u{13fd}'), + ('\u{1401}', '\u{166c}'), + ('\u{166f}', '\u{167f}'), + ('\u{1681}', '\u{169a}'), + ('\u{16a0}', '\u{16ea}'), + ('\u{16ee}', '\u{16f8}'), + ('\u{1700}', '\u{170c}'), + ('\u{170e}', '\u{1714}'), + ('\u{1720}', '\u{1734}'), + ('\u{1740}', '\u{1753}'), + ('\u{1760}', '\u{176c}'), + ('\u{176e}', '\u{1770}'), + ('\u{1772}', '\u{1773}'), + ('\u{1780}', '\u{17d3}'), + ('\u{17d7}', '\u{17d7}'), + ('\u{17dc}', '\u{17dd}'), + ('\u{17e0}', '\u{17e9}'), + ('\u{180b}', '\u{180d}'), + ('\u{1810}', '\u{1819}'), + ('\u{1820}', '\u{1878}'), + ('\u{1880}', '\u{18aa}'), + ('\u{18b0}', '\u{18f5}'), + ('\u{1900}', '\u{191e}'), + ('\u{1920}', '\u{192b}'), + ('\u{1930}', '\u{193b}'), + ('\u{1946}', '\u{196d}'), + ('\u{1970}', '\u{1974}'), + ('\u{1980}', '\u{19ab}'), + ('\u{19b0}', '\u{19c9}'), + ('\u{19d0}', '\u{19da}'), + ('\u{1a00}', '\u{1a1b}'), + ('\u{1a20}', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a89}'), + ('\u{1a90}', '\u{1a99}'), + ('\u{1aa7}', '\u{1aa7}'), + ('\u{1ab0}', '\u{1abd}'), + ('\u{1abf}', '\u{1ac0}'), + ('\u{1b00}', '\u{1b4b}'), + ('\u{1b50}', '\u{1b59}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1bf3}'), + ('\u{1c00}', '\u{1c37}'), + ('\u{1c40}', '\u{1c49}'), + ('\u{1c4d}', '\u{1c7d}'), + ('\u{1c80}', '\u{1c88}'), + ('\u{1c90}', '\u{1cba}'), + ('\u{1cbd}', '\u{1cbf}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1cfa}'), + ('\u{1d00}', '\u{1df9}'), + ('\u{1dfb}', '\u{1f15}'), + ('\u{1f18}', '\u{1f1d}'), + ('\u{1f20}', '\u{1f45}'), + ('\u{1f48}', '\u{1f4d}'), + ('\u{1f50}', '\u{1f57}'), + ('\u{1f59}', '\u{1f59}'), + ('\u{1f5b}', '\u{1f5b}'), + ('\u{1f5d}', '\u{1f5d}'), + ('\u{1f5f}', '\u{1f7d}'), + ('\u{1f80}', '\u{1fb4}'), + ('\u{1fb6}', '\u{1fbc}'), + ('\u{1fbe}', '\u{1fbe}'), + ('\u{1fc2}', '\u{1fc4}'), + ('\u{1fc6}', '\u{1fcc}'), + ('\u{1fd0}', '\u{1fd3}'), + ('\u{1fd6}', '\u{1fdb}'), + ('\u{1fe0}', '\u{1fec}'), + ('\u{1ff2}', '\u{1ff4}'), + ('\u{1ff6}', '\u{1ffc}'), + ('\u{203f}', '\u{2040}'), + ('\u{2054}', '\u{2054}'), + ('\u{2071}', '\u{2071}'), + ('\u{207f}', '\u{207f}'), + ('\u{2090}', '\u{209c}'), + ('\u{20d0}', '\u{20dc}'), + ('\u{20e1}', '\u{20e1}'), + ('\u{20e5}', '\u{20f0}'), + ('\u{2102}', '\u{2102}'), + ('\u{2107}', '\u{2107}'), + ('\u{210a}', '\u{2113}'), + ('\u{2115}', '\u{2115}'), + ('\u{2118}', '\u{211d}'), + ('\u{2124}', '\u{2124}'), + ('\u{2126}', '\u{2126}'), + ('\u{2128}', '\u{2128}'), + ('\u{212a}', '\u{2139}'), + ('\u{213c}', '\u{213f}'), + ('\u{2145}', '\u{2149}'), + ('\u{214e}', '\u{214e}'), + ('\u{2160}', '\u{2188}'), + ('\u{2c00}', '\u{2c2e}'), + ('\u{2c30}', '\u{2c5e}'), + ('\u{2c60}', '\u{2ce4}'), + ('\u{2ceb}', '\u{2cf3}'), + ('\u{2d00}', '\u{2d25}'), + ('\u{2d27}', '\u{2d27}'), + ('\u{2d2d}', '\u{2d2d}'), + ('\u{2d30}', '\u{2d67}'), + ('\u{2d6f}', '\u{2d6f}'), + ('\u{2d7f}', '\u{2d96}'), + ('\u{2da0}', '\u{2da6}'), + ('\u{2da8}', '\u{2dae}'), + ('\u{2db0}', '\u{2db6}'), + ('\u{2db8}', '\u{2dbe}'), + ('\u{2dc0}', '\u{2dc6}'), + ('\u{2dc8}', '\u{2dce}'), + ('\u{2dd0}', '\u{2dd6}'), + ('\u{2dd8}', '\u{2dde}'), + ('\u{2de0}', '\u{2dff}'), + ('\u{3005}', '\u{3007}'), + ('\u{3021}', '\u{302f}'), + ('\u{3031}', '\u{3035}'), + ('\u{3038}', '\u{303c}'), + ('\u{3041}', '\u{3096}'), + ('\u{3099}', '\u{309a}'), + ('\u{309d}', '\u{309f}'), + ('\u{30a1}', '\u{30fa}'), + ('\u{30fc}', '\u{30ff}'), + ('\u{3105}', '\u{312f}'), + ('\u{3131}', '\u{318e}'), + ('\u{31a0}', '\u{31bf}'), + ('\u{31f0}', '\u{31ff}'), + ('\u{3400}', '\u{4dbf}'), + ('\u{4e00}', '\u{9ffc}'), + ('\u{a000}', '\u{a48c}'), + ('\u{a4d0}', '\u{a4fd}'), + ('\u{a500}', '\u{a60c}'), + ('\u{a610}', '\u{a62b}'), + ('\u{a640}', '\u{a66f}'), + ('\u{a674}', '\u{a67d}'), + ('\u{a67f}', '\u{a6f1}'), + ('\u{a717}', '\u{a71f}'), + ('\u{a722}', '\u{a788}'), + ('\u{a78b}', '\u{a7bf}'), + ('\u{a7c2}', '\u{a7ca}'), + ('\u{a7f5}', '\u{a827}'), + ('\u{a82c}', '\u{a82c}'), + ('\u{a840}', '\u{a873}'), + ('\u{a880}', '\u{a8c5}'), + ('\u{a8d0}', '\u{a8d9}'), + ('\u{a8e0}', '\u{a8f7}'), + ('\u{a8fb}', '\u{a8fb}'), + ('\u{a8fd}', '\u{a92d}'), + ('\u{a930}', '\u{a953}'), + ('\u{a960}', '\u{a97c}'), + ('\u{a980}', '\u{a9c0}'), + ('\u{a9cf}', '\u{a9d9}'), + ('\u{a9e0}', '\u{a9fe}'), + ('\u{aa00}', '\u{aa36}'), + ('\u{aa40}', '\u{aa4d}'), + ('\u{aa50}', '\u{aa59}'), + ('\u{aa60}', '\u{aa76}'), + ('\u{aa7a}', '\u{aac2}'), + ('\u{aadb}', '\u{aadd}'), + ('\u{aae0}', '\u{aaef}'), + ('\u{aaf2}', '\u{aaf6}'), + ('\u{ab01}', '\u{ab06}'), + ('\u{ab09}', '\u{ab0e}'), + ('\u{ab11}', '\u{ab16}'), + ('\u{ab20}', '\u{ab26}'), + ('\u{ab28}', '\u{ab2e}'), + ('\u{ab30}', '\u{ab5a}'), + ('\u{ab5c}', '\u{ab69}'), + ('\u{ab70}', '\u{abea}'), + ('\u{abec}', '\u{abed}'), + ('\u{abf0}', '\u{abf9}'), + ('\u{ac00}', '\u{d7a3}'), + ('\u{d7b0}', '\u{d7c6}'), + ('\u{d7cb}', '\u{d7fb}'), + ('\u{f900}', '\u{fa6d}'), + ('\u{fa70}', '\u{fad9}'), + ('\u{fb00}', '\u{fb06}'), + ('\u{fb13}', '\u{fb17}'), + ('\u{fb1d}', '\u{fb28}'), + ('\u{fb2a}', '\u{fb36}'), + ('\u{fb38}', '\u{fb3c}'), + ('\u{fb3e}', '\u{fb3e}'), + ('\u{fb40}', '\u{fb41}'), + ('\u{fb43}', '\u{fb44}'), + ('\u{fb46}', '\u{fbb1}'), + ('\u{fbd3}', '\u{fc5d}'), + ('\u{fc64}', '\u{fd3d}'), + ('\u{fd50}', '\u{fd8f}'), + ('\u{fd92}', '\u{fdc7}'), + ('\u{fdf0}', '\u{fdf9}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('\u{fe33}', '\u{fe34}'), + ('\u{fe4d}', '\u{fe4f}'), + ('\u{fe71}', '\u{fe71}'), + ('\u{fe73}', '\u{fe73}'), + ('\u{fe77}', '\u{fe77}'), + ('\u{fe79}', '\u{fe79}'), + ('\u{fe7b}', '\u{fe7b}'), + ('\u{fe7d}', '\u{fe7d}'), + ('\u{fe7f}', '\u{fefc}'), + ('\u{ff10}', '\u{ff19}'), + ('\u{ff21}', '\u{ff3a}'), + ('\u{ff3f}', '\u{ff3f}'), + ('\u{ff41}', '\u{ff5a}'), + ('\u{ff66}', '\u{ffbe}'), + ('\u{ffc2}', '\u{ffc7}'), + ('\u{ffca}', '\u{ffcf}'), + ('\u{ffd2}', '\u{ffd7}'), + ('\u{ffda}', '\u{ffdc}'), + ('\u{10000}', '\u{1000b}'), + ('\u{1000d}', '\u{10026}'), + ('\u{10028}', '\u{1003a}'), + ('\u{1003c}', '\u{1003d}'), + ('\u{1003f}', '\u{1004d}'), + ('\u{10050}', '\u{1005d}'), + ('\u{10080}', '\u{100fa}'), + ('\u{10140}', '\u{10174}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{10280}', '\u{1029c}'), + ('\u{102a0}', '\u{102d0}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10300}', '\u{1031f}'), + ('\u{1032d}', '\u{1034a}'), + ('\u{10350}', '\u{1037a}'), + ('\u{10380}', '\u{1039d}'), + ('\u{103a0}', '\u{103c3}'), + ('\u{103c8}', '\u{103cf}'), + ('\u{103d1}', '\u{103d5}'), + ('\u{10400}', '\u{1049d}'), + ('\u{104a0}', '\u{104a9}'), + ('\u{104b0}', '\u{104d3}'), + ('\u{104d8}', '\u{104fb}'), + ('\u{10500}', '\u{10527}'), + ('\u{10530}', '\u{10563}'), + ('\u{10600}', '\u{10736}'), + ('\u{10740}', '\u{10755}'), + ('\u{10760}', '\u{10767}'), + ('\u{10800}', '\u{10805}'), + ('\u{10808}', '\u{10808}'), + ('\u{1080a}', '\u{10835}'), + ('\u{10837}', '\u{10838}'), + ('\u{1083c}', '\u{1083c}'), + ('\u{1083f}', '\u{10855}'), + ('\u{10860}', '\u{10876}'), + ('\u{10880}', '\u{1089e}'), + ('\u{108e0}', '\u{108f2}'), + ('\u{108f4}', '\u{108f5}'), + ('\u{10900}', '\u{10915}'), + ('\u{10920}', '\u{10939}'), + ('\u{10980}', '\u{109b7}'), + ('\u{109be}', '\u{109bf}'), + ('\u{10a00}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a13}'), + ('\u{10a15}', '\u{10a17}'), + ('\u{10a19}', '\u{10a35}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10a60}', '\u{10a7c}'), + ('\u{10a80}', '\u{10a9c}'), + ('\u{10ac0}', '\u{10ac7}'), + ('\u{10ac9}', '\u{10ae6}'), + ('\u{10b00}', '\u{10b35}'), + ('\u{10b40}', '\u{10b55}'), + ('\u{10b60}', '\u{10b72}'), + ('\u{10b80}', '\u{10b91}'), + ('\u{10c00}', '\u{10c48}'), + ('\u{10c80}', '\u{10cb2}'), + ('\u{10cc0}', '\u{10cf2}'), + ('\u{10d00}', '\u{10d27}'), + ('\u{10d30}', '\u{10d39}'), + ('\u{10e80}', '\u{10ea9}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10eb0}', '\u{10eb1}'), + ('\u{10f00}', '\u{10f1c}'), + ('\u{10f27}', '\u{10f27}'), + ('\u{10f30}', '\u{10f50}'), + ('\u{10fb0}', '\u{10fc4}'), + ('\u{10fe0}', '\u{10ff6}'), + ('\u{11000}', '\u{11046}'), + ('\u{11066}', '\u{1106f}'), + ('\u{1107f}', '\u{110ba}'), + ('\u{110d0}', '\u{110e8}'), + ('\u{110f0}', '\u{110f9}'), + ('\u{11100}', '\u{11134}'), + ('\u{11136}', '\u{1113f}'), + ('\u{11144}', '\u{11147}'), + ('\u{11150}', '\u{11173}'), + ('\u{11176}', '\u{11176}'), + ('\u{11180}', '\u{111c4}'), + ('\u{111c9}', '\u{111cc}'), + ('\u{111ce}', '\u{111da}'), + ('\u{111dc}', '\u{111dc}'), + ('\u{11200}', '\u{11211}'), + ('\u{11213}', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11280}', '\u{11286}'), + ('\u{11288}', '\u{11288}'), + ('\u{1128a}', '\u{1128d}'), + ('\u{1128f}', '\u{1129d}'), + ('\u{1129f}', '\u{112a8}'), + ('\u{112b0}', '\u{112ea}'), + ('\u{112f0}', '\u{112f9}'), + ('\u{11300}', '\u{11303}'), + ('\u{11305}', '\u{1130c}'), + ('\u{1130f}', '\u{11310}'), + ('\u{11313}', '\u{11328}'), + ('\u{1132a}', '\u{11330}'), + ('\u{11332}', '\u{11333}'), + ('\u{11335}', '\u{11339}'), + ('\u{1133b}', '\u{11344}'), + ('\u{11347}', '\u{11348}'), + ('\u{1134b}', '\u{1134d}'), + ('\u{11350}', '\u{11350}'), + ('\u{11357}', '\u{11357}'), + ('\u{1135d}', '\u{11363}'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{11400}', '\u{1144a}'), + ('\u{11450}', '\u{11459}'), + ('\u{1145e}', '\u{11461}'), + ('\u{11480}', '\u{114c5}'), + ('\u{114c7}', '\u{114c7}'), + ('\u{114d0}', '\u{114d9}'), + ('\u{11580}', '\u{115b5}'), + ('\u{115b8}', '\u{115c0}'), + ('\u{115d8}', '\u{115dd}'), + ('\u{11600}', '\u{11640}'), + ('\u{11644}', '\u{11644}'), + ('\u{11650}', '\u{11659}'), + ('\u{11680}', '\u{116b8}'), + ('\u{116c0}', '\u{116c9}'), + ('\u{11700}', '\u{1171a}'), + ('\u{1171d}', '\u{1172b}'), + ('\u{11730}', '\u{11739}'), + ('\u{11800}', '\u{1183a}'), + ('\u{118a0}', '\u{118e9}'), + ('\u{118ff}', '\u{11906}'), + ('\u{11909}', '\u{11909}'), + ('\u{1190c}', '\u{11913}'), + ('\u{11915}', '\u{11916}'), + ('\u{11918}', '\u{11935}'), + ('\u{11937}', '\u{11938}'), + ('\u{1193b}', '\u{11943}'), + ('\u{11950}', '\u{11959}'), + ('\u{119a0}', '\u{119a7}'), + ('\u{119aa}', '\u{119d7}'), + ('\u{119da}', '\u{119e1}'), + ('\u{119e3}', '\u{119e4}'), + ('\u{11a00}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a50}', '\u{11a99}'), + ('\u{11a9d}', '\u{11a9d}'), + ('\u{11ac0}', '\u{11af8}'), + ('\u{11c00}', '\u{11c08}'), + ('\u{11c0a}', '\u{11c36}'), + ('\u{11c38}', '\u{11c40}'), + ('\u{11c50}', '\u{11c59}'), + ('\u{11c72}', '\u{11c8f}'), + ('\u{11c92}', '\u{11ca7}'), + ('\u{11ca9}', '\u{11cb6}'), + ('\u{11d00}', '\u{11d06}'), + ('\u{11d08}', '\u{11d09}'), + ('\u{11d0b}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d47}'), + ('\u{11d50}', '\u{11d59}'), + ('\u{11d60}', '\u{11d65}'), + ('\u{11d67}', '\u{11d68}'), + ('\u{11d6a}', '\u{11d8e}'), + ('\u{11d90}', '\u{11d91}'), + ('\u{11d93}', '\u{11d98}'), + ('\u{11da0}', '\u{11da9}'), + ('\u{11ee0}', '\u{11ef6}'), + ('\u{11fb0}', '\u{11fb0}'), + ('\u{12000}', '\u{12399}'), + ('\u{12400}', '\u{1246e}'), + ('\u{12480}', '\u{12543}'), + ('\u{13000}', '\u{1342e}'), + ('\u{14400}', '\u{14646}'), + ('\u{16800}', '\u{16a38}'), + ('\u{16a40}', '\u{16a5e}'), + ('\u{16a60}', '\u{16a69}'), + ('\u{16ad0}', '\u{16aed}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b00}', '\u{16b36}'), + ('\u{16b40}', '\u{16b43}'), + ('\u{16b50}', '\u{16b59}'), + ('\u{16b63}', '\u{16b77}'), + ('\u{16b7d}', '\u{16b8f}'), + ('\u{16e40}', '\u{16e7f}'), + ('\u{16f00}', '\u{16f4a}'), + ('\u{16f4f}', '\u{16f87}'), + ('\u{16f8f}', '\u{16f9f}'), + ('\u{16fe0}', '\u{16fe1}'), + ('\u{16fe3}', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{17000}', '\u{187f7}'), + ('\u{18800}', '\u{18cd5}'), + ('\u{18d00}', '\u{18d08}'), + ('\u{1b000}', '\u{1b11e}'), + ('\u{1b150}', '\u{1b152}'), + ('\u{1b164}', '\u{1b167}'), + ('\u{1b170}', '\u{1b2fb}'), + ('\u{1bc00}', '\u{1bc6a}'), + ('\u{1bc70}', '\u{1bc7c}'), + ('\u{1bc80}', '\u{1bc88}'), + ('\u{1bc90}', '\u{1bc99}'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1d400}', '\u{1d454}'), + ('\u{1d456}', '\u{1d49c}'), + ('\u{1d49e}', '\u{1d49f}'), + ('\u{1d4a2}', '\u{1d4a2}'), + ('\u{1d4a5}', '\u{1d4a6}'), + ('\u{1d4a9}', '\u{1d4ac}'), + ('\u{1d4ae}', '\u{1d4b9}'), + ('\u{1d4bb}', '\u{1d4bb}'), + ('\u{1d4bd}', '\u{1d4c3}'), + ('\u{1d4c5}', '\u{1d505}'), + ('\u{1d507}', '\u{1d50a}'), + ('\u{1d50d}', '\u{1d514}'), + ('\u{1d516}', '\u{1d51c}'), + ('\u{1d51e}', '\u{1d539}'), + ('\u{1d53b}', '\u{1d53e}'), + ('\u{1d540}', '\u{1d544}'), + ('\u{1d546}', '\u{1d546}'), + ('\u{1d54a}', '\u{1d550}'), + ('\u{1d552}', '\u{1d6a5}'), + ('\u{1d6a8}', '\u{1d6c0}'), + ('\u{1d6c2}', '\u{1d6da}'), + ('\u{1d6dc}', '\u{1d6fa}'), + ('\u{1d6fc}', '\u{1d714}'), + ('\u{1d716}', '\u{1d734}'), + ('\u{1d736}', '\u{1d74e}'), + ('\u{1d750}', '\u{1d76e}'), + ('\u{1d770}', '\u{1d788}'), + ('\u{1d78a}', '\u{1d7a8}'), + ('\u{1d7aa}', '\u{1d7c2}'), + ('\u{1d7c4}', '\u{1d7cb}'), + ('\u{1d7ce}', '\u{1d7ff}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e100}', '\u{1e12c}'), + ('\u{1e130}', '\u{1e13d}'), + ('\u{1e140}', '\u{1e149}'), + ('\u{1e14e}', '\u{1e14e}'), + ('\u{1e2c0}', '\u{1e2f9}'), + ('\u{1e800}', '\u{1e8c4}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e900}', '\u{1e94b}'), + ('\u{1e950}', '\u{1e959}'), + ('\u{1ee00}', '\u{1ee03}'), + ('\u{1ee05}', '\u{1ee1f}'), + ('\u{1ee21}', '\u{1ee22}'), + ('\u{1ee24}', '\u{1ee24}'), + ('\u{1ee27}', '\u{1ee27}'), + ('\u{1ee29}', '\u{1ee32}'), + ('\u{1ee34}', '\u{1ee37}'), + ('\u{1ee39}', '\u{1ee39}'), + ('\u{1ee3b}', '\u{1ee3b}'), + ('\u{1ee42}', '\u{1ee42}'), + ('\u{1ee47}', '\u{1ee47}'), + ('\u{1ee49}', '\u{1ee49}'), + ('\u{1ee4b}', '\u{1ee4b}'), + ('\u{1ee4d}', '\u{1ee4f}'), + ('\u{1ee51}', '\u{1ee52}'), + ('\u{1ee54}', '\u{1ee54}'), + ('\u{1ee57}', '\u{1ee57}'), + ('\u{1ee59}', '\u{1ee59}'), + ('\u{1ee5b}', '\u{1ee5b}'), + ('\u{1ee5d}', '\u{1ee5d}'), + ('\u{1ee5f}', '\u{1ee5f}'), + ('\u{1ee61}', '\u{1ee62}'), + ('\u{1ee64}', '\u{1ee64}'), + ('\u{1ee67}', '\u{1ee6a}'), + ('\u{1ee6c}', '\u{1ee72}'), + ('\u{1ee74}', '\u{1ee77}'), + ('\u{1ee79}', '\u{1ee7c}'), + ('\u{1ee7e}', '\u{1ee7e}'), + ('\u{1ee80}', '\u{1ee89}'), + ('\u{1ee8b}', '\u{1ee9b}'), + ('\u{1eea1}', '\u{1eea3}'), + ('\u{1eea5}', '\u{1eea9}'), + ('\u{1eeab}', '\u{1eebb}'), + ('\u{1fbf0}', '\u{1fbf9}'), + ('\u{20000}', '\u{2a6dd}'), + ('\u{2a700}', '\u{2b734}'), + ('\u{2b740}', '\u{2b81d}'), + ('\u{2b820}', '\u{2cea1}'), + ('\u{2ceb0}', '\u{2ebe0}'), + ('\u{2f800}', '\u{2fa1d}'), + ('\u{30000}', '\u{3134a}'), + ('\u{e0100}', '\u{e01ef}'), ]; pub fn XID_Continue(c: char) -> bool { @@ -254,198 +782,638 @@ } pub const XID_Start_table: &[(char, char)] = &[ - ('\u{41}', '\u{5a}'), ('\u{61}', '\u{7a}'), ('\u{aa}', '\u{aa}'), ('\u{b5}', '\u{b5}'), - ('\u{ba}', '\u{ba}'), ('\u{c0}', '\u{d6}'), ('\u{d8}', '\u{f6}'), ('\u{f8}', '\u{2c1}'), - ('\u{2c6}', '\u{2d1}'), ('\u{2e0}', '\u{2e4}'), ('\u{2ec}', '\u{2ec}'), ('\u{2ee}', - '\u{2ee}'), ('\u{370}', '\u{374}'), ('\u{376}', '\u{377}'), ('\u{37b}', '\u{37d}'), - ('\u{37f}', '\u{37f}'), ('\u{386}', '\u{386}'), ('\u{388}', '\u{38a}'), ('\u{38c}', - '\u{38c}'), ('\u{38e}', '\u{3a1}'), ('\u{3a3}', '\u{3f5}'), ('\u{3f7}', '\u{481}'), - ('\u{48a}', '\u{52f}'), ('\u{531}', '\u{556}'), ('\u{559}', '\u{559}'), ('\u{560}', - '\u{588}'), ('\u{5d0}', '\u{5ea}'), ('\u{5ef}', '\u{5f2}'), ('\u{620}', '\u{64a}'), - ('\u{66e}', '\u{66f}'), ('\u{671}', '\u{6d3}'), ('\u{6d5}', '\u{6d5}'), ('\u{6e5}', - '\u{6e6}'), ('\u{6ee}', '\u{6ef}'), ('\u{6fa}', '\u{6fc}'), ('\u{6ff}', '\u{6ff}'), - ('\u{710}', '\u{710}'), ('\u{712}', '\u{72f}'), ('\u{74d}', '\u{7a5}'), ('\u{7b1}', - '\u{7b1}'), ('\u{7ca}', '\u{7ea}'), ('\u{7f4}', '\u{7f5}'), ('\u{7fa}', '\u{7fa}'), - ('\u{800}', '\u{815}'), ('\u{81a}', '\u{81a}'), ('\u{824}', '\u{824}'), ('\u{828}', - '\u{828}'), ('\u{840}', '\u{858}'), ('\u{860}', '\u{86a}'), ('\u{8a0}', '\u{8b4}'), - ('\u{8b6}', '\u{8bd}'), ('\u{904}', '\u{939}'), ('\u{93d}', '\u{93d}'), ('\u{950}', - '\u{950}'), ('\u{958}', '\u{961}'), ('\u{971}', '\u{980}'), ('\u{985}', '\u{98c}'), - ('\u{98f}', '\u{990}'), ('\u{993}', '\u{9a8}'), ('\u{9aa}', '\u{9b0}'), ('\u{9b2}', - '\u{9b2}'), ('\u{9b6}', '\u{9b9}'), ('\u{9bd}', '\u{9bd}'), ('\u{9ce}', '\u{9ce}'), - ('\u{9dc}', '\u{9dd}'), ('\u{9df}', '\u{9e1}'), ('\u{9f0}', '\u{9f1}'), ('\u{9fc}', - '\u{9fc}'), ('\u{a05}', '\u{a0a}'), ('\u{a0f}', '\u{a10}'), ('\u{a13}', '\u{a28}'), - ('\u{a2a}', '\u{a30}'), ('\u{a32}', '\u{a33}'), ('\u{a35}', '\u{a36}'), ('\u{a38}', - '\u{a39}'), ('\u{a59}', '\u{a5c}'), ('\u{a5e}', '\u{a5e}'), ('\u{a72}', '\u{a74}'), - ('\u{a85}', '\u{a8d}'), ('\u{a8f}', '\u{a91}'), ('\u{a93}', '\u{aa8}'), ('\u{aaa}', - '\u{ab0}'), ('\u{ab2}', '\u{ab3}'), ('\u{ab5}', '\u{ab9}'), ('\u{abd}', '\u{abd}'), - ('\u{ad0}', '\u{ad0}'), ('\u{ae0}', '\u{ae1}'), ('\u{af9}', '\u{af9}'), ('\u{b05}', - '\u{b0c}'), ('\u{b0f}', '\u{b10}'), ('\u{b13}', '\u{b28}'), ('\u{b2a}', '\u{b30}'), - ('\u{b32}', '\u{b33}'), ('\u{b35}', '\u{b39}'), ('\u{b3d}', '\u{b3d}'), ('\u{b5c}', - '\u{b5d}'), ('\u{b5f}', '\u{b61}'), ('\u{b71}', '\u{b71}'), ('\u{b83}', '\u{b83}'), - ('\u{b85}', '\u{b8a}'), ('\u{b8e}', '\u{b90}'), ('\u{b92}', '\u{b95}'), ('\u{b99}', - '\u{b9a}'), ('\u{b9c}', '\u{b9c}'), ('\u{b9e}', '\u{b9f}'), ('\u{ba3}', '\u{ba4}'), - ('\u{ba8}', '\u{baa}'), ('\u{bae}', '\u{bb9}'), ('\u{bd0}', '\u{bd0}'), ('\u{c05}', - '\u{c0c}'), ('\u{c0e}', '\u{c10}'), ('\u{c12}', '\u{c28}'), ('\u{c2a}', '\u{c39}'), - ('\u{c3d}', '\u{c3d}'), ('\u{c58}', '\u{c5a}'), ('\u{c60}', '\u{c61}'), ('\u{c80}', - '\u{c80}'), ('\u{c85}', '\u{c8c}'), ('\u{c8e}', '\u{c90}'), ('\u{c92}', '\u{ca8}'), - ('\u{caa}', '\u{cb3}'), ('\u{cb5}', '\u{cb9}'), ('\u{cbd}', '\u{cbd}'), ('\u{cde}', - '\u{cde}'), ('\u{ce0}', '\u{ce1}'), ('\u{cf1}', '\u{cf2}'), ('\u{d05}', '\u{d0c}'), - ('\u{d0e}', '\u{d10}'), ('\u{d12}', '\u{d3a}'), ('\u{d3d}', '\u{d3d}'), ('\u{d4e}', - '\u{d4e}'), ('\u{d54}', '\u{d56}'), ('\u{d5f}', '\u{d61}'), ('\u{d7a}', '\u{d7f}'), - ('\u{d85}', '\u{d96}'), ('\u{d9a}', '\u{db1}'), ('\u{db3}', '\u{dbb}'), ('\u{dbd}', - '\u{dbd}'), ('\u{dc0}', '\u{dc6}'), ('\u{e01}', '\u{e30}'), ('\u{e32}', '\u{e32}'), - ('\u{e40}', '\u{e46}'), ('\u{e81}', '\u{e82}'), ('\u{e84}', '\u{e84}'), ('\u{e86}', - '\u{e8a}'), ('\u{e8c}', '\u{ea3}'), ('\u{ea5}', '\u{ea5}'), ('\u{ea7}', '\u{eb0}'), - ('\u{eb2}', '\u{eb2}'), ('\u{ebd}', '\u{ebd}'), ('\u{ec0}', '\u{ec4}'), ('\u{ec6}', - '\u{ec6}'), ('\u{edc}', '\u{edf}'), ('\u{f00}', '\u{f00}'), ('\u{f40}', '\u{f47}'), - ('\u{f49}', '\u{f6c}'), ('\u{f88}', '\u{f8c}'), ('\u{1000}', '\u{102a}'), ('\u{103f}', - '\u{103f}'), ('\u{1050}', '\u{1055}'), ('\u{105a}', '\u{105d}'), ('\u{1061}', '\u{1061}'), - ('\u{1065}', '\u{1066}'), ('\u{106e}', '\u{1070}'), ('\u{1075}', '\u{1081}'), ('\u{108e}', - '\u{108e}'), ('\u{10a0}', '\u{10c5}'), ('\u{10c7}', '\u{10c7}'), ('\u{10cd}', '\u{10cd}'), - ('\u{10d0}', '\u{10fa}'), ('\u{10fc}', '\u{1248}'), ('\u{124a}', '\u{124d}'), ('\u{1250}', - '\u{1256}'), ('\u{1258}', '\u{1258}'), ('\u{125a}', '\u{125d}'), ('\u{1260}', '\u{1288}'), - ('\u{128a}', '\u{128d}'), ('\u{1290}', '\u{12b0}'), ('\u{12b2}', '\u{12b5}'), ('\u{12b8}', - '\u{12be}'), ('\u{12c0}', '\u{12c0}'), ('\u{12c2}', '\u{12c5}'), ('\u{12c8}', '\u{12d6}'), - ('\u{12d8}', '\u{1310}'), ('\u{1312}', '\u{1315}'), ('\u{1318}', '\u{135a}'), ('\u{1380}', - '\u{138f}'), ('\u{13a0}', '\u{13f5}'), ('\u{13f8}', '\u{13fd}'), ('\u{1401}', '\u{166c}'), - ('\u{166f}', '\u{167f}'), ('\u{1681}', '\u{169a}'), ('\u{16a0}', '\u{16ea}'), ('\u{16ee}', - '\u{16f8}'), ('\u{1700}', '\u{170c}'), ('\u{170e}', '\u{1711}'), ('\u{1720}', '\u{1731}'), - ('\u{1740}', '\u{1751}'), ('\u{1760}', '\u{176c}'), ('\u{176e}', '\u{1770}'), ('\u{1780}', - '\u{17b3}'), ('\u{17d7}', '\u{17d7}'), ('\u{17dc}', '\u{17dc}'), ('\u{1820}', '\u{1878}'), - ('\u{1880}', '\u{18a8}'), ('\u{18aa}', '\u{18aa}'), ('\u{18b0}', '\u{18f5}'), ('\u{1900}', - '\u{191e}'), ('\u{1950}', '\u{196d}'), ('\u{1970}', '\u{1974}'), ('\u{1980}', '\u{19ab}'), - ('\u{19b0}', '\u{19c9}'), ('\u{1a00}', '\u{1a16}'), ('\u{1a20}', '\u{1a54}'), ('\u{1aa7}', - '\u{1aa7}'), ('\u{1b05}', '\u{1b33}'), ('\u{1b45}', '\u{1b4b}'), ('\u{1b83}', '\u{1ba0}'), - ('\u{1bae}', '\u{1baf}'), ('\u{1bba}', '\u{1be5}'), ('\u{1c00}', '\u{1c23}'), ('\u{1c4d}', - '\u{1c4f}'), ('\u{1c5a}', '\u{1c7d}'), ('\u{1c80}', '\u{1c88}'), ('\u{1c90}', '\u{1cba}'), - ('\u{1cbd}', '\u{1cbf}'), ('\u{1ce9}', '\u{1cec}'), ('\u{1cee}', '\u{1cf3}'), ('\u{1cf5}', - '\u{1cf6}'), ('\u{1cfa}', '\u{1cfa}'), ('\u{1d00}', '\u{1dbf}'), ('\u{1e00}', '\u{1f15}'), - ('\u{1f18}', '\u{1f1d}'), ('\u{1f20}', '\u{1f45}'), ('\u{1f48}', '\u{1f4d}'), ('\u{1f50}', - '\u{1f57}'), ('\u{1f59}', '\u{1f59}'), ('\u{1f5b}', '\u{1f5b}'), ('\u{1f5d}', '\u{1f5d}'), - ('\u{1f5f}', '\u{1f7d}'), ('\u{1f80}', '\u{1fb4}'), ('\u{1fb6}', '\u{1fbc}'), ('\u{1fbe}', - '\u{1fbe}'), ('\u{1fc2}', '\u{1fc4}'), ('\u{1fc6}', '\u{1fcc}'), ('\u{1fd0}', '\u{1fd3}'), - ('\u{1fd6}', '\u{1fdb}'), ('\u{1fe0}', '\u{1fec}'), ('\u{1ff2}', '\u{1ff4}'), ('\u{1ff6}', - '\u{1ffc}'), ('\u{2071}', '\u{2071}'), ('\u{207f}', '\u{207f}'), ('\u{2090}', '\u{209c}'), - ('\u{2102}', '\u{2102}'), ('\u{2107}', '\u{2107}'), ('\u{210a}', '\u{2113}'), ('\u{2115}', - '\u{2115}'), ('\u{2118}', '\u{211d}'), ('\u{2124}', '\u{2124}'), ('\u{2126}', '\u{2126}'), - ('\u{2128}', '\u{2128}'), ('\u{212a}', '\u{2139}'), ('\u{213c}', '\u{213f}'), ('\u{2145}', - '\u{2149}'), ('\u{214e}', '\u{214e}'), ('\u{2160}', '\u{2188}'), ('\u{2c00}', '\u{2c2e}'), - ('\u{2c30}', '\u{2c5e}'), ('\u{2c60}', '\u{2ce4}'), ('\u{2ceb}', '\u{2cee}'), ('\u{2cf2}', - '\u{2cf3}'), ('\u{2d00}', '\u{2d25}'), ('\u{2d27}', '\u{2d27}'), ('\u{2d2d}', '\u{2d2d}'), - ('\u{2d30}', '\u{2d67}'), ('\u{2d6f}', '\u{2d6f}'), ('\u{2d80}', '\u{2d96}'), ('\u{2da0}', - '\u{2da6}'), ('\u{2da8}', '\u{2dae}'), ('\u{2db0}', '\u{2db6}'), ('\u{2db8}', '\u{2dbe}'), - ('\u{2dc0}', '\u{2dc6}'), ('\u{2dc8}', '\u{2dce}'), ('\u{2dd0}', '\u{2dd6}'), ('\u{2dd8}', - '\u{2dde}'), ('\u{3005}', '\u{3007}'), ('\u{3021}', '\u{3029}'), ('\u{3031}', '\u{3035}'), - ('\u{3038}', '\u{303c}'), ('\u{3041}', '\u{3096}'), ('\u{309d}', '\u{309f}'), ('\u{30a1}', - '\u{30fa}'), ('\u{30fc}', '\u{30ff}'), ('\u{3105}', '\u{312f}'), ('\u{3131}', '\u{318e}'), - ('\u{31a0}', '\u{31ba}'), ('\u{31f0}', '\u{31ff}'), ('\u{3400}', '\u{4db5}'), ('\u{4e00}', - '\u{9fef}'), ('\u{a000}', '\u{a48c}'), ('\u{a4d0}', '\u{a4fd}'), ('\u{a500}', '\u{a60c}'), - ('\u{a610}', '\u{a61f}'), ('\u{a62a}', '\u{a62b}'), ('\u{a640}', '\u{a66e}'), ('\u{a67f}', - '\u{a69d}'), ('\u{a6a0}', '\u{a6ef}'), ('\u{a717}', '\u{a71f}'), ('\u{a722}', '\u{a788}'), - ('\u{a78b}', '\u{a7bf}'), ('\u{a7c2}', '\u{a7c6}'), ('\u{a7f7}', '\u{a801}'), ('\u{a803}', - '\u{a805}'), ('\u{a807}', '\u{a80a}'), ('\u{a80c}', '\u{a822}'), ('\u{a840}', '\u{a873}'), - ('\u{a882}', '\u{a8b3}'), ('\u{a8f2}', '\u{a8f7}'), ('\u{a8fb}', '\u{a8fb}'), ('\u{a8fd}', - '\u{a8fe}'), ('\u{a90a}', '\u{a925}'), ('\u{a930}', '\u{a946}'), ('\u{a960}', '\u{a97c}'), - ('\u{a984}', '\u{a9b2}'), ('\u{a9cf}', '\u{a9cf}'), ('\u{a9e0}', '\u{a9e4}'), ('\u{a9e6}', - '\u{a9ef}'), ('\u{a9fa}', '\u{a9fe}'), ('\u{aa00}', '\u{aa28}'), ('\u{aa40}', '\u{aa42}'), - ('\u{aa44}', '\u{aa4b}'), ('\u{aa60}', '\u{aa76}'), ('\u{aa7a}', '\u{aa7a}'), ('\u{aa7e}', - '\u{aaaf}'), ('\u{aab1}', '\u{aab1}'), ('\u{aab5}', '\u{aab6}'), ('\u{aab9}', '\u{aabd}'), - ('\u{aac0}', '\u{aac0}'), ('\u{aac2}', '\u{aac2}'), ('\u{aadb}', '\u{aadd}'), ('\u{aae0}', - '\u{aaea}'), ('\u{aaf2}', '\u{aaf4}'), ('\u{ab01}', '\u{ab06}'), ('\u{ab09}', '\u{ab0e}'), - ('\u{ab11}', '\u{ab16}'), ('\u{ab20}', '\u{ab26}'), ('\u{ab28}', '\u{ab2e}'), ('\u{ab30}', - '\u{ab5a}'), ('\u{ab5c}', '\u{ab67}'), ('\u{ab70}', '\u{abe2}'), ('\u{ac00}', '\u{d7a3}'), - ('\u{d7b0}', '\u{d7c6}'), ('\u{d7cb}', '\u{d7fb}'), ('\u{f900}', '\u{fa6d}'), ('\u{fa70}', - '\u{fad9}'), ('\u{fb00}', '\u{fb06}'), ('\u{fb13}', '\u{fb17}'), ('\u{fb1d}', '\u{fb1d}'), - ('\u{fb1f}', '\u{fb28}'), ('\u{fb2a}', '\u{fb36}'), ('\u{fb38}', '\u{fb3c}'), ('\u{fb3e}', - '\u{fb3e}'), ('\u{fb40}', '\u{fb41}'), ('\u{fb43}', '\u{fb44}'), ('\u{fb46}', '\u{fbb1}'), - ('\u{fbd3}', '\u{fc5d}'), ('\u{fc64}', '\u{fd3d}'), ('\u{fd50}', '\u{fd8f}'), ('\u{fd92}', - '\u{fdc7}'), ('\u{fdf0}', '\u{fdf9}'), ('\u{fe71}', '\u{fe71}'), ('\u{fe73}', '\u{fe73}'), - ('\u{fe77}', '\u{fe77}'), ('\u{fe79}', '\u{fe79}'), ('\u{fe7b}', '\u{fe7b}'), ('\u{fe7d}', - '\u{fe7d}'), ('\u{fe7f}', '\u{fefc}'), ('\u{ff21}', '\u{ff3a}'), ('\u{ff41}', '\u{ff5a}'), - ('\u{ff66}', '\u{ff9d}'), ('\u{ffa0}', '\u{ffbe}'), ('\u{ffc2}', '\u{ffc7}'), ('\u{ffca}', - '\u{ffcf}'), ('\u{ffd2}', '\u{ffd7}'), ('\u{ffda}', '\u{ffdc}'), ('\u{10000}', '\u{1000b}'), - ('\u{1000d}', '\u{10026}'), ('\u{10028}', '\u{1003a}'), ('\u{1003c}', '\u{1003d}'), - ('\u{1003f}', '\u{1004d}'), ('\u{10050}', '\u{1005d}'), ('\u{10080}', '\u{100fa}'), - ('\u{10140}', '\u{10174}'), ('\u{10280}', '\u{1029c}'), ('\u{102a0}', '\u{102d0}'), - ('\u{10300}', '\u{1031f}'), ('\u{1032d}', '\u{1034a}'), ('\u{10350}', '\u{10375}'), - ('\u{10380}', '\u{1039d}'), ('\u{103a0}', '\u{103c3}'), ('\u{103c8}', '\u{103cf}'), - ('\u{103d1}', '\u{103d5}'), ('\u{10400}', '\u{1049d}'), ('\u{104b0}', '\u{104d3}'), - ('\u{104d8}', '\u{104fb}'), ('\u{10500}', '\u{10527}'), ('\u{10530}', '\u{10563}'), - ('\u{10600}', '\u{10736}'), ('\u{10740}', '\u{10755}'), ('\u{10760}', '\u{10767}'), - ('\u{10800}', '\u{10805}'), ('\u{10808}', '\u{10808}'), ('\u{1080a}', '\u{10835}'), - ('\u{10837}', '\u{10838}'), ('\u{1083c}', '\u{1083c}'), ('\u{1083f}', '\u{10855}'), - ('\u{10860}', '\u{10876}'), ('\u{10880}', '\u{1089e}'), ('\u{108e0}', '\u{108f2}'), - ('\u{108f4}', '\u{108f5}'), ('\u{10900}', '\u{10915}'), ('\u{10920}', '\u{10939}'), - ('\u{10980}', '\u{109b7}'), ('\u{109be}', '\u{109bf}'), ('\u{10a00}', '\u{10a00}'), - ('\u{10a10}', '\u{10a13}'), ('\u{10a15}', '\u{10a17}'), ('\u{10a19}', '\u{10a35}'), - ('\u{10a60}', '\u{10a7c}'), ('\u{10a80}', '\u{10a9c}'), ('\u{10ac0}', '\u{10ac7}'), - ('\u{10ac9}', '\u{10ae4}'), ('\u{10b00}', '\u{10b35}'), ('\u{10b40}', '\u{10b55}'), - ('\u{10b60}', '\u{10b72}'), ('\u{10b80}', '\u{10b91}'), ('\u{10c00}', '\u{10c48}'), - ('\u{10c80}', '\u{10cb2}'), ('\u{10cc0}', '\u{10cf2}'), ('\u{10d00}', '\u{10d23}'), - ('\u{10f00}', '\u{10f1c}'), ('\u{10f27}', '\u{10f27}'), ('\u{10f30}', '\u{10f45}'), - ('\u{10fe0}', '\u{10ff6}'), ('\u{11003}', '\u{11037}'), ('\u{11083}', '\u{110af}'), - ('\u{110d0}', '\u{110e8}'), ('\u{11103}', '\u{11126}'), ('\u{11144}', '\u{11144}'), - ('\u{11150}', '\u{11172}'), ('\u{11176}', '\u{11176}'), ('\u{11183}', '\u{111b2}'), - ('\u{111c1}', '\u{111c4}'), ('\u{111da}', '\u{111da}'), ('\u{111dc}', '\u{111dc}'), - ('\u{11200}', '\u{11211}'), ('\u{11213}', '\u{1122b}'), ('\u{11280}', '\u{11286}'), - ('\u{11288}', '\u{11288}'), ('\u{1128a}', '\u{1128d}'), ('\u{1128f}', '\u{1129d}'), - ('\u{1129f}', '\u{112a8}'), ('\u{112b0}', '\u{112de}'), ('\u{11305}', '\u{1130c}'), - ('\u{1130f}', '\u{11310}'), ('\u{11313}', '\u{11328}'), ('\u{1132a}', '\u{11330}'), - ('\u{11332}', '\u{11333}'), ('\u{11335}', '\u{11339}'), ('\u{1133d}', '\u{1133d}'), - ('\u{11350}', '\u{11350}'), ('\u{1135d}', '\u{11361}'), ('\u{11400}', '\u{11434}'), - ('\u{11447}', '\u{1144a}'), ('\u{1145f}', '\u{1145f}'), ('\u{11480}', '\u{114af}'), - ('\u{114c4}', '\u{114c5}'), ('\u{114c7}', '\u{114c7}'), ('\u{11580}', '\u{115ae}'), - ('\u{115d8}', '\u{115db}'), ('\u{11600}', '\u{1162f}'), ('\u{11644}', '\u{11644}'), - ('\u{11680}', '\u{116aa}'), ('\u{116b8}', '\u{116b8}'), ('\u{11700}', '\u{1171a}'), - ('\u{11800}', '\u{1182b}'), ('\u{118a0}', '\u{118df}'), ('\u{118ff}', '\u{118ff}'), - ('\u{119a0}', '\u{119a7}'), ('\u{119aa}', '\u{119d0}'), ('\u{119e1}', '\u{119e1}'), - ('\u{119e3}', '\u{119e3}'), ('\u{11a00}', '\u{11a00}'), ('\u{11a0b}', '\u{11a32}'), - ('\u{11a3a}', '\u{11a3a}'), ('\u{11a50}', '\u{11a50}'), ('\u{11a5c}', '\u{11a89}'), - ('\u{11a9d}', '\u{11a9d}'), ('\u{11ac0}', '\u{11af8}'), ('\u{11c00}', '\u{11c08}'), - ('\u{11c0a}', '\u{11c2e}'), ('\u{11c40}', '\u{11c40}'), ('\u{11c72}', '\u{11c8f}'), - ('\u{11d00}', '\u{11d06}'), ('\u{11d08}', '\u{11d09}'), ('\u{11d0b}', '\u{11d30}'), - ('\u{11d46}', '\u{11d46}'), ('\u{11d60}', '\u{11d65}'), ('\u{11d67}', '\u{11d68}'), - ('\u{11d6a}', '\u{11d89}'), ('\u{11d98}', '\u{11d98}'), ('\u{11ee0}', '\u{11ef2}'), - ('\u{12000}', '\u{12399}'), ('\u{12400}', '\u{1246e}'), ('\u{12480}', '\u{12543}'), - ('\u{13000}', '\u{1342e}'), ('\u{14400}', '\u{14646}'), ('\u{16800}', '\u{16a38}'), - ('\u{16a40}', '\u{16a5e}'), ('\u{16ad0}', '\u{16aed}'), ('\u{16b00}', '\u{16b2f}'), - ('\u{16b40}', '\u{16b43}'), ('\u{16b63}', '\u{16b77}'), ('\u{16b7d}', '\u{16b8f}'), - ('\u{16e40}', '\u{16e7f}'), ('\u{16f00}', '\u{16f4a}'), ('\u{16f50}', '\u{16f50}'), - ('\u{16f93}', '\u{16f9f}'), ('\u{16fe0}', '\u{16fe1}'), ('\u{16fe3}', '\u{16fe3}'), - ('\u{17000}', '\u{187f7}'), ('\u{18800}', '\u{18af2}'), ('\u{1b000}', '\u{1b11e}'), - ('\u{1b150}', '\u{1b152}'), ('\u{1b164}', '\u{1b167}'), ('\u{1b170}', '\u{1b2fb}'), - ('\u{1bc00}', '\u{1bc6a}'), ('\u{1bc70}', '\u{1bc7c}'), ('\u{1bc80}', '\u{1bc88}'), - ('\u{1bc90}', '\u{1bc99}'), ('\u{1d400}', '\u{1d454}'), ('\u{1d456}', '\u{1d49c}'), - ('\u{1d49e}', '\u{1d49f}'), ('\u{1d4a2}', '\u{1d4a2}'), ('\u{1d4a5}', '\u{1d4a6}'), - ('\u{1d4a9}', '\u{1d4ac}'), ('\u{1d4ae}', '\u{1d4b9}'), ('\u{1d4bb}', '\u{1d4bb}'), - ('\u{1d4bd}', '\u{1d4c3}'), ('\u{1d4c5}', '\u{1d505}'), ('\u{1d507}', '\u{1d50a}'), - ('\u{1d50d}', '\u{1d514}'), ('\u{1d516}', '\u{1d51c}'), ('\u{1d51e}', '\u{1d539}'), - ('\u{1d53b}', '\u{1d53e}'), ('\u{1d540}', '\u{1d544}'), ('\u{1d546}', '\u{1d546}'), - ('\u{1d54a}', '\u{1d550}'), ('\u{1d552}', '\u{1d6a5}'), ('\u{1d6a8}', '\u{1d6c0}'), - ('\u{1d6c2}', '\u{1d6da}'), ('\u{1d6dc}', '\u{1d6fa}'), ('\u{1d6fc}', '\u{1d714}'), - ('\u{1d716}', '\u{1d734}'), ('\u{1d736}', '\u{1d74e}'), ('\u{1d750}', '\u{1d76e}'), - ('\u{1d770}', '\u{1d788}'), ('\u{1d78a}', '\u{1d7a8}'), ('\u{1d7aa}', '\u{1d7c2}'), - ('\u{1d7c4}', '\u{1d7cb}'), ('\u{1e100}', '\u{1e12c}'), ('\u{1e137}', '\u{1e13d}'), - ('\u{1e14e}', '\u{1e14e}'), ('\u{1e2c0}', '\u{1e2eb}'), ('\u{1e800}', '\u{1e8c4}'), - ('\u{1e900}', '\u{1e943}'), ('\u{1e94b}', '\u{1e94b}'), ('\u{1ee00}', '\u{1ee03}'), - ('\u{1ee05}', '\u{1ee1f}'), ('\u{1ee21}', '\u{1ee22}'), ('\u{1ee24}', '\u{1ee24}'), - ('\u{1ee27}', '\u{1ee27}'), ('\u{1ee29}', '\u{1ee32}'), ('\u{1ee34}', '\u{1ee37}'), - ('\u{1ee39}', '\u{1ee39}'), ('\u{1ee3b}', '\u{1ee3b}'), ('\u{1ee42}', '\u{1ee42}'), - ('\u{1ee47}', '\u{1ee47}'), ('\u{1ee49}', '\u{1ee49}'), ('\u{1ee4b}', '\u{1ee4b}'), - ('\u{1ee4d}', '\u{1ee4f}'), ('\u{1ee51}', '\u{1ee52}'), ('\u{1ee54}', '\u{1ee54}'), - ('\u{1ee57}', '\u{1ee57}'), ('\u{1ee59}', '\u{1ee59}'), ('\u{1ee5b}', '\u{1ee5b}'), - ('\u{1ee5d}', '\u{1ee5d}'), ('\u{1ee5f}', '\u{1ee5f}'), ('\u{1ee61}', '\u{1ee62}'), - ('\u{1ee64}', '\u{1ee64}'), ('\u{1ee67}', '\u{1ee6a}'), ('\u{1ee6c}', '\u{1ee72}'), - ('\u{1ee74}', '\u{1ee77}'), ('\u{1ee79}', '\u{1ee7c}'), ('\u{1ee7e}', '\u{1ee7e}'), - ('\u{1ee80}', '\u{1ee89}'), ('\u{1ee8b}', '\u{1ee9b}'), ('\u{1eea1}', '\u{1eea3}'), - ('\u{1eea5}', '\u{1eea9}'), ('\u{1eeab}', '\u{1eebb}'), ('\u{20000}', '\u{2a6d6}'), - ('\u{2a700}', '\u{2b734}'), ('\u{2b740}', '\u{2b81d}'), ('\u{2b820}', '\u{2cea1}'), - ('\u{2ceb0}', '\u{2ebe0}'), ('\u{2f800}', '\u{2fa1d}') + ('\u{41}', '\u{5a}'), + ('\u{61}', '\u{7a}'), + ('\u{aa}', '\u{aa}'), + ('\u{b5}', '\u{b5}'), + ('\u{ba}', '\u{ba}'), + ('\u{c0}', '\u{d6}'), + ('\u{d8}', '\u{f6}'), + ('\u{f8}', '\u{2c1}'), + ('\u{2c6}', '\u{2d1}'), + ('\u{2e0}', '\u{2e4}'), + ('\u{2ec}', '\u{2ec}'), + ('\u{2ee}', '\u{2ee}'), + ('\u{370}', '\u{374}'), + ('\u{376}', '\u{377}'), + ('\u{37b}', '\u{37d}'), + ('\u{37f}', '\u{37f}'), + ('\u{386}', '\u{386}'), + ('\u{388}', '\u{38a}'), + ('\u{38c}', '\u{38c}'), + ('\u{38e}', '\u{3a1}'), + ('\u{3a3}', '\u{3f5}'), + ('\u{3f7}', '\u{481}'), + ('\u{48a}', '\u{52f}'), + ('\u{531}', '\u{556}'), + ('\u{559}', '\u{559}'), + ('\u{560}', '\u{588}'), + ('\u{5d0}', '\u{5ea}'), + ('\u{5ef}', '\u{5f2}'), + ('\u{620}', '\u{64a}'), + ('\u{66e}', '\u{66f}'), + ('\u{671}', '\u{6d3}'), + ('\u{6d5}', '\u{6d5}'), + ('\u{6e5}', '\u{6e6}'), + ('\u{6ee}', '\u{6ef}'), + ('\u{6fa}', '\u{6fc}'), + ('\u{6ff}', '\u{6ff}'), + ('\u{710}', '\u{710}'), + ('\u{712}', '\u{72f}'), + ('\u{74d}', '\u{7a5}'), + ('\u{7b1}', '\u{7b1}'), + ('\u{7ca}', '\u{7ea}'), + ('\u{7f4}', '\u{7f5}'), + ('\u{7fa}', '\u{7fa}'), + ('\u{800}', '\u{815}'), + ('\u{81a}', '\u{81a}'), + ('\u{824}', '\u{824}'), + ('\u{828}', '\u{828}'), + ('\u{840}', '\u{858}'), + ('\u{860}', '\u{86a}'), + ('\u{8a0}', '\u{8b4}'), + ('\u{8b6}', '\u{8c7}'), + ('\u{904}', '\u{939}'), + ('\u{93d}', '\u{93d}'), + ('\u{950}', '\u{950}'), + ('\u{958}', '\u{961}'), + ('\u{971}', '\u{980}'), + ('\u{985}', '\u{98c}'), + ('\u{98f}', '\u{990}'), + ('\u{993}', '\u{9a8}'), + ('\u{9aa}', '\u{9b0}'), + ('\u{9b2}', '\u{9b2}'), + ('\u{9b6}', '\u{9b9}'), + ('\u{9bd}', '\u{9bd}'), + ('\u{9ce}', '\u{9ce}'), + ('\u{9dc}', '\u{9dd}'), + ('\u{9df}', '\u{9e1}'), + ('\u{9f0}', '\u{9f1}'), + ('\u{9fc}', '\u{9fc}'), + ('\u{a05}', '\u{a0a}'), + ('\u{a0f}', '\u{a10}'), + ('\u{a13}', '\u{a28}'), + ('\u{a2a}', '\u{a30}'), + ('\u{a32}', '\u{a33}'), + ('\u{a35}', '\u{a36}'), + ('\u{a38}', '\u{a39}'), + ('\u{a59}', '\u{a5c}'), + ('\u{a5e}', '\u{a5e}'), + ('\u{a72}', '\u{a74}'), + ('\u{a85}', '\u{a8d}'), + ('\u{a8f}', '\u{a91}'), + ('\u{a93}', '\u{aa8}'), + ('\u{aaa}', '\u{ab0}'), + ('\u{ab2}', '\u{ab3}'), + ('\u{ab5}', '\u{ab9}'), + ('\u{abd}', '\u{abd}'), + ('\u{ad0}', '\u{ad0}'), + ('\u{ae0}', '\u{ae1}'), + ('\u{af9}', '\u{af9}'), + ('\u{b05}', '\u{b0c}'), + ('\u{b0f}', '\u{b10}'), + ('\u{b13}', '\u{b28}'), + ('\u{b2a}', '\u{b30}'), + ('\u{b32}', '\u{b33}'), + ('\u{b35}', '\u{b39}'), + ('\u{b3d}', '\u{b3d}'), + ('\u{b5c}', '\u{b5d}'), + ('\u{b5f}', '\u{b61}'), + ('\u{b71}', '\u{b71}'), + ('\u{b83}', '\u{b83}'), + ('\u{b85}', '\u{b8a}'), + ('\u{b8e}', '\u{b90}'), + ('\u{b92}', '\u{b95}'), + ('\u{b99}', '\u{b9a}'), + ('\u{b9c}', '\u{b9c}'), + ('\u{b9e}', '\u{b9f}'), + ('\u{ba3}', '\u{ba4}'), + ('\u{ba8}', '\u{baa}'), + ('\u{bae}', '\u{bb9}'), + ('\u{bd0}', '\u{bd0}'), + ('\u{c05}', '\u{c0c}'), + ('\u{c0e}', '\u{c10}'), + ('\u{c12}', '\u{c28}'), + ('\u{c2a}', '\u{c39}'), + ('\u{c3d}', '\u{c3d}'), + ('\u{c58}', '\u{c5a}'), + ('\u{c60}', '\u{c61}'), + ('\u{c80}', '\u{c80}'), + ('\u{c85}', '\u{c8c}'), + ('\u{c8e}', '\u{c90}'), + ('\u{c92}', '\u{ca8}'), + ('\u{caa}', '\u{cb3}'), + ('\u{cb5}', '\u{cb9}'), + ('\u{cbd}', '\u{cbd}'), + ('\u{cde}', '\u{cde}'), + ('\u{ce0}', '\u{ce1}'), + ('\u{cf1}', '\u{cf2}'), + ('\u{d04}', '\u{d0c}'), + ('\u{d0e}', '\u{d10}'), + ('\u{d12}', '\u{d3a}'), + ('\u{d3d}', '\u{d3d}'), + ('\u{d4e}', '\u{d4e}'), + ('\u{d54}', '\u{d56}'), + ('\u{d5f}', '\u{d61}'), + ('\u{d7a}', '\u{d7f}'), + ('\u{d85}', '\u{d96}'), + ('\u{d9a}', '\u{db1}'), + ('\u{db3}', '\u{dbb}'), + ('\u{dbd}', '\u{dbd}'), + ('\u{dc0}', '\u{dc6}'), + ('\u{e01}', '\u{e30}'), + ('\u{e32}', '\u{e32}'), + ('\u{e40}', '\u{e46}'), + ('\u{e81}', '\u{e82}'), + ('\u{e84}', '\u{e84}'), + ('\u{e86}', '\u{e8a}'), + ('\u{e8c}', '\u{ea3}'), + ('\u{ea5}', '\u{ea5}'), + ('\u{ea7}', '\u{eb0}'), + ('\u{eb2}', '\u{eb2}'), + ('\u{ebd}', '\u{ebd}'), + ('\u{ec0}', '\u{ec4}'), + ('\u{ec6}', '\u{ec6}'), + ('\u{edc}', '\u{edf}'), + ('\u{f00}', '\u{f00}'), + ('\u{f40}', '\u{f47}'), + ('\u{f49}', '\u{f6c}'), + ('\u{f88}', '\u{f8c}'), + ('\u{1000}', '\u{102a}'), + ('\u{103f}', '\u{103f}'), + ('\u{1050}', '\u{1055}'), + ('\u{105a}', '\u{105d}'), + ('\u{1061}', '\u{1061}'), + ('\u{1065}', '\u{1066}'), + ('\u{106e}', '\u{1070}'), + ('\u{1075}', '\u{1081}'), + ('\u{108e}', '\u{108e}'), + ('\u{10a0}', '\u{10c5}'), + ('\u{10c7}', '\u{10c7}'), + ('\u{10cd}', '\u{10cd}'), + ('\u{10d0}', '\u{10fa}'), + ('\u{10fc}', '\u{1248}'), + ('\u{124a}', '\u{124d}'), + ('\u{1250}', '\u{1256}'), + ('\u{1258}', '\u{1258}'), + ('\u{125a}', '\u{125d}'), + ('\u{1260}', '\u{1288}'), + ('\u{128a}', '\u{128d}'), + ('\u{1290}', '\u{12b0}'), + ('\u{12b2}', '\u{12b5}'), + ('\u{12b8}', '\u{12be}'), + ('\u{12c0}', '\u{12c0}'), + ('\u{12c2}', '\u{12c5}'), + ('\u{12c8}', '\u{12d6}'), + ('\u{12d8}', '\u{1310}'), + ('\u{1312}', '\u{1315}'), + ('\u{1318}', '\u{135a}'), + ('\u{1380}', '\u{138f}'), + ('\u{13a0}', '\u{13f5}'), + ('\u{13f8}', '\u{13fd}'), + ('\u{1401}', '\u{166c}'), + ('\u{166f}', '\u{167f}'), + ('\u{1681}', '\u{169a}'), + ('\u{16a0}', '\u{16ea}'), + ('\u{16ee}', '\u{16f8}'), + ('\u{1700}', '\u{170c}'), + ('\u{170e}', '\u{1711}'), + ('\u{1720}', '\u{1731}'), + ('\u{1740}', '\u{1751}'), + ('\u{1760}', '\u{176c}'), + ('\u{176e}', '\u{1770}'), + ('\u{1780}', '\u{17b3}'), + ('\u{17d7}', '\u{17d7}'), + ('\u{17dc}', '\u{17dc}'), + ('\u{1820}', '\u{1878}'), + ('\u{1880}', '\u{18a8}'), + ('\u{18aa}', '\u{18aa}'), + ('\u{18b0}', '\u{18f5}'), + ('\u{1900}', '\u{191e}'), + ('\u{1950}', '\u{196d}'), + ('\u{1970}', '\u{1974}'), + ('\u{1980}', '\u{19ab}'), + ('\u{19b0}', '\u{19c9}'), + ('\u{1a00}', '\u{1a16}'), + ('\u{1a20}', '\u{1a54}'), + ('\u{1aa7}', '\u{1aa7}'), + ('\u{1b05}', '\u{1b33}'), + ('\u{1b45}', '\u{1b4b}'), + ('\u{1b83}', '\u{1ba0}'), + ('\u{1bae}', '\u{1baf}'), + ('\u{1bba}', '\u{1be5}'), + ('\u{1c00}', '\u{1c23}'), + ('\u{1c4d}', '\u{1c4f}'), + ('\u{1c5a}', '\u{1c7d}'), + ('\u{1c80}', '\u{1c88}'), + ('\u{1c90}', '\u{1cba}'), + ('\u{1cbd}', '\u{1cbf}'), + ('\u{1ce9}', '\u{1cec}'), + ('\u{1cee}', '\u{1cf3}'), + ('\u{1cf5}', '\u{1cf6}'), + ('\u{1cfa}', '\u{1cfa}'), + ('\u{1d00}', '\u{1dbf}'), + ('\u{1e00}', '\u{1f15}'), + ('\u{1f18}', '\u{1f1d}'), + ('\u{1f20}', '\u{1f45}'), + ('\u{1f48}', '\u{1f4d}'), + ('\u{1f50}', '\u{1f57}'), + ('\u{1f59}', '\u{1f59}'), + ('\u{1f5b}', '\u{1f5b}'), + ('\u{1f5d}', '\u{1f5d}'), + ('\u{1f5f}', '\u{1f7d}'), + ('\u{1f80}', '\u{1fb4}'), + ('\u{1fb6}', '\u{1fbc}'), + ('\u{1fbe}', '\u{1fbe}'), + ('\u{1fc2}', '\u{1fc4}'), + ('\u{1fc6}', '\u{1fcc}'), + ('\u{1fd0}', '\u{1fd3}'), + ('\u{1fd6}', '\u{1fdb}'), + ('\u{1fe0}', '\u{1fec}'), + ('\u{1ff2}', '\u{1ff4}'), + ('\u{1ff6}', '\u{1ffc}'), + ('\u{2071}', '\u{2071}'), + ('\u{207f}', '\u{207f}'), + ('\u{2090}', '\u{209c}'), + ('\u{2102}', '\u{2102}'), + ('\u{2107}', '\u{2107}'), + ('\u{210a}', '\u{2113}'), + ('\u{2115}', '\u{2115}'), + ('\u{2118}', '\u{211d}'), + ('\u{2124}', '\u{2124}'), + ('\u{2126}', '\u{2126}'), + ('\u{2128}', '\u{2128}'), + ('\u{212a}', '\u{2139}'), + ('\u{213c}', '\u{213f}'), + ('\u{2145}', '\u{2149}'), + ('\u{214e}', '\u{214e}'), + ('\u{2160}', '\u{2188}'), + ('\u{2c00}', '\u{2c2e}'), + ('\u{2c30}', '\u{2c5e}'), + ('\u{2c60}', '\u{2ce4}'), + ('\u{2ceb}', '\u{2cee}'), + ('\u{2cf2}', '\u{2cf3}'), + ('\u{2d00}', '\u{2d25}'), + ('\u{2d27}', '\u{2d27}'), + ('\u{2d2d}', '\u{2d2d}'), + ('\u{2d30}', '\u{2d67}'), + ('\u{2d6f}', '\u{2d6f}'), + ('\u{2d80}', '\u{2d96}'), + ('\u{2da0}', '\u{2da6}'), + ('\u{2da8}', '\u{2dae}'), + ('\u{2db0}', '\u{2db6}'), + ('\u{2db8}', '\u{2dbe}'), + ('\u{2dc0}', '\u{2dc6}'), + ('\u{2dc8}', '\u{2dce}'), + ('\u{2dd0}', '\u{2dd6}'), + ('\u{2dd8}', '\u{2dde}'), + ('\u{3005}', '\u{3007}'), + ('\u{3021}', '\u{3029}'), + ('\u{3031}', '\u{3035}'), + ('\u{3038}', '\u{303c}'), + ('\u{3041}', '\u{3096}'), + ('\u{309d}', '\u{309f}'), + ('\u{30a1}', '\u{30fa}'), + ('\u{30fc}', '\u{30ff}'), + ('\u{3105}', '\u{312f}'), + ('\u{3131}', '\u{318e}'), + ('\u{31a0}', '\u{31bf}'), + ('\u{31f0}', '\u{31ff}'), + ('\u{3400}', '\u{4dbf}'), + ('\u{4e00}', '\u{9ffc}'), + ('\u{a000}', '\u{a48c}'), + ('\u{a4d0}', '\u{a4fd}'), + ('\u{a500}', '\u{a60c}'), + ('\u{a610}', '\u{a61f}'), + ('\u{a62a}', '\u{a62b}'), + ('\u{a640}', '\u{a66e}'), + ('\u{a67f}', '\u{a69d}'), + ('\u{a6a0}', '\u{a6ef}'), + ('\u{a717}', '\u{a71f}'), + ('\u{a722}', '\u{a788}'), + ('\u{a78b}', '\u{a7bf}'), + ('\u{a7c2}', '\u{a7ca}'), + ('\u{a7f5}', '\u{a801}'), + ('\u{a803}', '\u{a805}'), + ('\u{a807}', '\u{a80a}'), + ('\u{a80c}', '\u{a822}'), + ('\u{a840}', '\u{a873}'), + ('\u{a882}', '\u{a8b3}'), + ('\u{a8f2}', '\u{a8f7}'), + ('\u{a8fb}', '\u{a8fb}'), + ('\u{a8fd}', '\u{a8fe}'), + ('\u{a90a}', '\u{a925}'), + ('\u{a930}', '\u{a946}'), + ('\u{a960}', '\u{a97c}'), + ('\u{a984}', '\u{a9b2}'), + ('\u{a9cf}', '\u{a9cf}'), + ('\u{a9e0}', '\u{a9e4}'), + ('\u{a9e6}', '\u{a9ef}'), + ('\u{a9fa}', '\u{a9fe}'), + ('\u{aa00}', '\u{aa28}'), + ('\u{aa40}', '\u{aa42}'), + ('\u{aa44}', '\u{aa4b}'), + ('\u{aa60}', '\u{aa76}'), + ('\u{aa7a}', '\u{aa7a}'), + ('\u{aa7e}', '\u{aaaf}'), + ('\u{aab1}', '\u{aab1}'), + ('\u{aab5}', '\u{aab6}'), + ('\u{aab9}', '\u{aabd}'), + ('\u{aac0}', '\u{aac0}'), + ('\u{aac2}', '\u{aac2}'), + ('\u{aadb}', '\u{aadd}'), + ('\u{aae0}', '\u{aaea}'), + ('\u{aaf2}', '\u{aaf4}'), + ('\u{ab01}', '\u{ab06}'), + ('\u{ab09}', '\u{ab0e}'), + ('\u{ab11}', '\u{ab16}'), + ('\u{ab20}', '\u{ab26}'), + ('\u{ab28}', '\u{ab2e}'), + ('\u{ab30}', '\u{ab5a}'), + ('\u{ab5c}', '\u{ab69}'), + ('\u{ab70}', '\u{abe2}'), + ('\u{ac00}', '\u{d7a3}'), + ('\u{d7b0}', '\u{d7c6}'), + ('\u{d7cb}', '\u{d7fb}'), + ('\u{f900}', '\u{fa6d}'), + ('\u{fa70}', '\u{fad9}'), + ('\u{fb00}', '\u{fb06}'), + ('\u{fb13}', '\u{fb17}'), + ('\u{fb1d}', '\u{fb1d}'), + ('\u{fb1f}', '\u{fb28}'), + ('\u{fb2a}', '\u{fb36}'), + ('\u{fb38}', '\u{fb3c}'), + ('\u{fb3e}', '\u{fb3e}'), + ('\u{fb40}', '\u{fb41}'), + ('\u{fb43}', '\u{fb44}'), + ('\u{fb46}', '\u{fbb1}'), + ('\u{fbd3}', '\u{fc5d}'), + ('\u{fc64}', '\u{fd3d}'), + ('\u{fd50}', '\u{fd8f}'), + ('\u{fd92}', '\u{fdc7}'), + ('\u{fdf0}', '\u{fdf9}'), + ('\u{fe71}', '\u{fe71}'), + ('\u{fe73}', '\u{fe73}'), + ('\u{fe77}', '\u{fe77}'), + ('\u{fe79}', '\u{fe79}'), + ('\u{fe7b}', '\u{fe7b}'), + ('\u{fe7d}', '\u{fe7d}'), + ('\u{fe7f}', '\u{fefc}'), + ('\u{ff21}', '\u{ff3a}'), + ('\u{ff41}', '\u{ff5a}'), + ('\u{ff66}', '\u{ff9d}'), + ('\u{ffa0}', '\u{ffbe}'), + ('\u{ffc2}', '\u{ffc7}'), + ('\u{ffca}', '\u{ffcf}'), + ('\u{ffd2}', '\u{ffd7}'), + ('\u{ffda}', '\u{ffdc}'), + ('\u{10000}', '\u{1000b}'), + ('\u{1000d}', '\u{10026}'), + ('\u{10028}', '\u{1003a}'), + ('\u{1003c}', '\u{1003d}'), + ('\u{1003f}', '\u{1004d}'), + ('\u{10050}', '\u{1005d}'), + ('\u{10080}', '\u{100fa}'), + ('\u{10140}', '\u{10174}'), + ('\u{10280}', '\u{1029c}'), + ('\u{102a0}', '\u{102d0}'), + ('\u{10300}', '\u{1031f}'), + ('\u{1032d}', '\u{1034a}'), + ('\u{10350}', '\u{10375}'), + ('\u{10380}', '\u{1039d}'), + ('\u{103a0}', '\u{103c3}'), + ('\u{103c8}', '\u{103cf}'), + ('\u{103d1}', '\u{103d5}'), + ('\u{10400}', '\u{1049d}'), + ('\u{104b0}', '\u{104d3}'), + ('\u{104d8}', '\u{104fb}'), + ('\u{10500}', '\u{10527}'), + ('\u{10530}', '\u{10563}'), + ('\u{10600}', '\u{10736}'), + ('\u{10740}', '\u{10755}'), + ('\u{10760}', '\u{10767}'), + ('\u{10800}', '\u{10805}'), + ('\u{10808}', '\u{10808}'), + ('\u{1080a}', '\u{10835}'), + ('\u{10837}', '\u{10838}'), + ('\u{1083c}', '\u{1083c}'), + ('\u{1083f}', '\u{10855}'), + ('\u{10860}', '\u{10876}'), + ('\u{10880}', '\u{1089e}'), + ('\u{108e0}', '\u{108f2}'), + ('\u{108f4}', '\u{108f5}'), + ('\u{10900}', '\u{10915}'), + ('\u{10920}', '\u{10939}'), + ('\u{10980}', '\u{109b7}'), + ('\u{109be}', '\u{109bf}'), + ('\u{10a00}', '\u{10a00}'), + ('\u{10a10}', '\u{10a13}'), + ('\u{10a15}', '\u{10a17}'), + ('\u{10a19}', '\u{10a35}'), + ('\u{10a60}', '\u{10a7c}'), + ('\u{10a80}', '\u{10a9c}'), + ('\u{10ac0}', '\u{10ac7}'), + ('\u{10ac9}', '\u{10ae4}'), + ('\u{10b00}', '\u{10b35}'), + ('\u{10b40}', '\u{10b55}'), + ('\u{10b60}', '\u{10b72}'), + ('\u{10b80}', '\u{10b91}'), + ('\u{10c00}', '\u{10c48}'), + ('\u{10c80}', '\u{10cb2}'), + ('\u{10cc0}', '\u{10cf2}'), + ('\u{10d00}', '\u{10d23}'), + ('\u{10e80}', '\u{10ea9}'), + ('\u{10eb0}', '\u{10eb1}'), + ('\u{10f00}', '\u{10f1c}'), + ('\u{10f27}', '\u{10f27}'), + ('\u{10f30}', '\u{10f45}'), + ('\u{10fb0}', '\u{10fc4}'), + ('\u{10fe0}', '\u{10ff6}'), + ('\u{11003}', '\u{11037}'), + ('\u{11083}', '\u{110af}'), + ('\u{110d0}', '\u{110e8}'), + ('\u{11103}', '\u{11126}'), + ('\u{11144}', '\u{11144}'), + ('\u{11147}', '\u{11147}'), + ('\u{11150}', '\u{11172}'), + ('\u{11176}', '\u{11176}'), + ('\u{11183}', '\u{111b2}'), + ('\u{111c1}', '\u{111c4}'), + ('\u{111da}', '\u{111da}'), + ('\u{111dc}', '\u{111dc}'), + ('\u{11200}', '\u{11211}'), + ('\u{11213}', '\u{1122b}'), + ('\u{11280}', '\u{11286}'), + ('\u{11288}', '\u{11288}'), + ('\u{1128a}', '\u{1128d}'), + ('\u{1128f}', '\u{1129d}'), + ('\u{1129f}', '\u{112a8}'), + ('\u{112b0}', '\u{112de}'), + ('\u{11305}', '\u{1130c}'), + ('\u{1130f}', '\u{11310}'), + ('\u{11313}', '\u{11328}'), + ('\u{1132a}', '\u{11330}'), + ('\u{11332}', '\u{11333}'), + ('\u{11335}', '\u{11339}'), + ('\u{1133d}', '\u{1133d}'), + ('\u{11350}', '\u{11350}'), + ('\u{1135d}', '\u{11361}'), + ('\u{11400}', '\u{11434}'), + ('\u{11447}', '\u{1144a}'), + ('\u{1145f}', '\u{11461}'), + ('\u{11480}', '\u{114af}'), + ('\u{114c4}', '\u{114c5}'), + ('\u{114c7}', '\u{114c7}'), + ('\u{11580}', '\u{115ae}'), + ('\u{115d8}', '\u{115db}'), + ('\u{11600}', '\u{1162f}'), + ('\u{11644}', '\u{11644}'), + ('\u{11680}', '\u{116aa}'), + ('\u{116b8}', '\u{116b8}'), + ('\u{11700}', '\u{1171a}'), + ('\u{11800}', '\u{1182b}'), + ('\u{118a0}', '\u{118df}'), + ('\u{118ff}', '\u{11906}'), + ('\u{11909}', '\u{11909}'), + ('\u{1190c}', '\u{11913}'), + ('\u{11915}', '\u{11916}'), + ('\u{11918}', '\u{1192f}'), + ('\u{1193f}', '\u{1193f}'), + ('\u{11941}', '\u{11941}'), + ('\u{119a0}', '\u{119a7}'), + ('\u{119aa}', '\u{119d0}'), + ('\u{119e1}', '\u{119e1}'), + ('\u{119e3}', '\u{119e3}'), + ('\u{11a00}', '\u{11a00}'), + ('\u{11a0b}', '\u{11a32}'), + ('\u{11a3a}', '\u{11a3a}'), + ('\u{11a50}', '\u{11a50}'), + ('\u{11a5c}', '\u{11a89}'), + ('\u{11a9d}', '\u{11a9d}'), + ('\u{11ac0}', '\u{11af8}'), + ('\u{11c00}', '\u{11c08}'), + ('\u{11c0a}', '\u{11c2e}'), + ('\u{11c40}', '\u{11c40}'), + ('\u{11c72}', '\u{11c8f}'), + ('\u{11d00}', '\u{11d06}'), + ('\u{11d08}', '\u{11d09}'), + ('\u{11d0b}', '\u{11d30}'), + ('\u{11d46}', '\u{11d46}'), + ('\u{11d60}', '\u{11d65}'), + ('\u{11d67}', '\u{11d68}'), + ('\u{11d6a}', '\u{11d89}'), + ('\u{11d98}', '\u{11d98}'), + ('\u{11ee0}', '\u{11ef2}'), + ('\u{11fb0}', '\u{11fb0}'), + ('\u{12000}', '\u{12399}'), + ('\u{12400}', '\u{1246e}'), + ('\u{12480}', '\u{12543}'), + ('\u{13000}', '\u{1342e}'), + ('\u{14400}', '\u{14646}'), + ('\u{16800}', '\u{16a38}'), + ('\u{16a40}', '\u{16a5e}'), + ('\u{16ad0}', '\u{16aed}'), + ('\u{16b00}', '\u{16b2f}'), + ('\u{16b40}', '\u{16b43}'), + ('\u{16b63}', '\u{16b77}'), + ('\u{16b7d}', '\u{16b8f}'), + ('\u{16e40}', '\u{16e7f}'), + ('\u{16f00}', '\u{16f4a}'), + ('\u{16f50}', '\u{16f50}'), + ('\u{16f93}', '\u{16f9f}'), + ('\u{16fe0}', '\u{16fe1}'), + ('\u{16fe3}', '\u{16fe3}'), + ('\u{17000}', '\u{187f7}'), + ('\u{18800}', '\u{18cd5}'), + ('\u{18d00}', '\u{18d08}'), + ('\u{1b000}', '\u{1b11e}'), + ('\u{1b150}', '\u{1b152}'), + ('\u{1b164}', '\u{1b167}'), + ('\u{1b170}', '\u{1b2fb}'), + ('\u{1bc00}', '\u{1bc6a}'), + ('\u{1bc70}', '\u{1bc7c}'), + ('\u{1bc80}', '\u{1bc88}'), + ('\u{1bc90}', '\u{1bc99}'), + ('\u{1d400}', '\u{1d454}'), + ('\u{1d456}', '\u{1d49c}'), + ('\u{1d49e}', '\u{1d49f}'), + ('\u{1d4a2}', '\u{1d4a2}'), + ('\u{1d4a5}', '\u{1d4a6}'), + ('\u{1d4a9}', '\u{1d4ac}'), + ('\u{1d4ae}', '\u{1d4b9}'), + ('\u{1d4bb}', '\u{1d4bb}'), + ('\u{1d4bd}', '\u{1d4c3}'), + ('\u{1d4c5}', '\u{1d505}'), + ('\u{1d507}', '\u{1d50a}'), + ('\u{1d50d}', '\u{1d514}'), + ('\u{1d516}', '\u{1d51c}'), + ('\u{1d51e}', '\u{1d539}'), + ('\u{1d53b}', '\u{1d53e}'), + ('\u{1d540}', '\u{1d544}'), + ('\u{1d546}', '\u{1d546}'), + ('\u{1d54a}', '\u{1d550}'), + ('\u{1d552}', '\u{1d6a5}'), + ('\u{1d6a8}', '\u{1d6c0}'), + ('\u{1d6c2}', '\u{1d6da}'), + ('\u{1d6dc}', '\u{1d6fa}'), + ('\u{1d6fc}', '\u{1d714}'), + ('\u{1d716}', '\u{1d734}'), + ('\u{1d736}', '\u{1d74e}'), + ('\u{1d750}', '\u{1d76e}'), + ('\u{1d770}', '\u{1d788}'), + ('\u{1d78a}', '\u{1d7a8}'), + ('\u{1d7aa}', '\u{1d7c2}'), + ('\u{1d7c4}', '\u{1d7cb}'), + ('\u{1e100}', '\u{1e12c}'), + ('\u{1e137}', '\u{1e13d}'), + ('\u{1e14e}', '\u{1e14e}'), + ('\u{1e2c0}', '\u{1e2eb}'), + ('\u{1e800}', '\u{1e8c4}'), + ('\u{1e900}', '\u{1e943}'), + ('\u{1e94b}', '\u{1e94b}'), + ('\u{1ee00}', '\u{1ee03}'), + ('\u{1ee05}', '\u{1ee1f}'), + ('\u{1ee21}', '\u{1ee22}'), + ('\u{1ee24}', '\u{1ee24}'), + ('\u{1ee27}', '\u{1ee27}'), + ('\u{1ee29}', '\u{1ee32}'), + ('\u{1ee34}', '\u{1ee37}'), + ('\u{1ee39}', '\u{1ee39}'), + ('\u{1ee3b}', '\u{1ee3b}'), + ('\u{1ee42}', '\u{1ee42}'), + ('\u{1ee47}', '\u{1ee47}'), + ('\u{1ee49}', '\u{1ee49}'), + ('\u{1ee4b}', '\u{1ee4b}'), + ('\u{1ee4d}', '\u{1ee4f}'), + ('\u{1ee51}', '\u{1ee52}'), + ('\u{1ee54}', '\u{1ee54}'), + ('\u{1ee57}', '\u{1ee57}'), + ('\u{1ee59}', '\u{1ee59}'), + ('\u{1ee5b}', '\u{1ee5b}'), + ('\u{1ee5d}', '\u{1ee5d}'), + ('\u{1ee5f}', '\u{1ee5f}'), + ('\u{1ee61}', '\u{1ee62}'), + ('\u{1ee64}', '\u{1ee64}'), + ('\u{1ee67}', '\u{1ee6a}'), + ('\u{1ee6c}', '\u{1ee72}'), + ('\u{1ee74}', '\u{1ee77}'), + ('\u{1ee79}', '\u{1ee7c}'), + ('\u{1ee7e}', '\u{1ee7e}'), + ('\u{1ee80}', '\u{1ee89}'), + ('\u{1ee8b}', '\u{1ee9b}'), + ('\u{1eea1}', '\u{1eea3}'), + ('\u{1eea5}', '\u{1eea9}'), + ('\u{1eeab}', '\u{1eebb}'), + ('\u{20000}', '\u{2a6dd}'), + ('\u{2a700}', '\u{2b734}'), + ('\u{2b740}', '\u{2b81d}'), + ('\u{2b820}', '\u{2cea1}'), + ('\u{2ceb0}', '\u{2ebe0}'), + ('\u{2f800}', '\u{2fa1d}'), + ('\u{30000}', '\u{3134a}'), ]; pub fn XID_Start(c: char) -> bool { super::bsearch_range_table(c, XID_Start_table) } - } - diff -Nru cargo-0.44.1/vendor/unicode-xid/src/tests.rs cargo-0.47.0/vendor/unicode-xid/src/tests.rs --- cargo-0.44.1/vendor/unicode-xid/src/tests.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-xid/src/tests.rs 2020-10-01 21:38:28.000000000 +0000 @@ -11,9 +11,12 @@ #[cfg(feature = "bench")] use std::iter; #[cfg(feature = "bench")] +use std::prelude::v1::*; +#[cfg(feature = "bench")] use test::Bencher; + #[cfg(feature = "bench")] -use std::prelude::v1::*; +use UnicodeXID; #[cfg(feature = "bench")] #[bench] @@ -21,9 +24,7 @@ let string = iter::repeat('a').take(4096).collect::(); b.bytes = string.len() as u64; - b.iter(|| { - string.chars().all(super::UnicodeXID::is_xid_start) - }); + b.iter(|| string.chars().all(super::UnicodeXID::is_xid_start)); } #[cfg(feature = "bench")] @@ -32,9 +33,7 @@ let string = iter::repeat('a').take(4096).collect::(); b.bytes = string.len() as u64; - b.iter(|| { - string.chars().all(char::is_xid_start) - }); + b.iter(|| string.chars().all(char::is_xid_start)); } #[cfg(feature = "bench")] @@ -43,9 +42,7 @@ let string = iter::repeat('a').take(4096).collect::(); b.bytes = string.len() as u64; - b.iter(|| { - string.chars().all(super::UnicodeXID::is_xid_continue) - }); + b.iter(|| string.chars().all(super::UnicodeXID::is_xid_continue)); } #[cfg(feature = "bench")] @@ -54,17 +51,12 @@ let string = iter::repeat('a').take(4096).collect::(); b.bytes = string.len() as u64; - b.iter(|| { - string.chars().all(char::is_xid_continue) - }); + b.iter(|| string.chars().all(char::is_xid_continue)); } #[test] fn test_is_xid_start() { - let chars = [ - 'A', 'Z', 'a', 'z', - '\u{1000d}', '\u{10026}', - ]; + let chars = ['A', 'Z', 'a', 'z', '\u{1000d}', '\u{10026}']; for ch in &chars { assert!(super::UnicodeXID::is_xid_start(*ch), "{}", ch); @@ -74,10 +66,7 @@ #[test] fn test_is_not_xid_start() { let chars = [ - '\x00', '\x01', - '0', '9', - ' ', '[', '<', '{', '(', - '\u{02c2}', '\u{ffff}', + '\x00', '\x01', '0', '9', ' ', '[', '<', '{', '(', '\u{02c2}', '\u{ffff}', ]; for ch in &chars { @@ -87,10 +76,7 @@ #[test] fn test_is_xid_continue() { - let chars = [ - '0', '9', 'A', 'Z', 'a', 'z', '_', - '\u{1000d}', '\u{10026}', - ]; + let chars = ['0', '9', 'A', 'Z', 'a', 'z', '_', '\u{1000d}', '\u{10026}']; for ch in &chars { assert!(super::UnicodeXID::is_xid_continue(*ch), "{}", ch); @@ -100,9 +86,7 @@ #[test] fn test_is_not_xid_continue() { let chars = [ - '\x00', '\x01', - ' ', '[', '<', '{', '(', - '\u{02c2}', '\u{ffff}', + '\x00', '\x01', ' ', '[', '<', '{', '(', '\u{02c2}', '\u{ffff}', ]; for &ch in &chars { diff -Nru cargo-0.44.1/vendor/unicode-xid/tests/exhaustive_tests.rs cargo-0.47.0/vendor/unicode-xid/tests/exhaustive_tests.rs --- cargo-0.44.1/vendor/unicode-xid/tests/exhaustive_tests.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/unicode-xid/tests/exhaustive_tests.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,25 @@ +extern crate unicode_xid; +use unicode_xid::UnicodeXID; +/// A `char` in Rust is a Unicode Scalar Value +/// +/// See: http://www.unicode.org/glossary/#unicode_scalar_value +fn all_valid_chars() -> impl Iterator { + (0u32..=0xD7FF).chain(0xE000u32..=0x10FFFF).map(|u| { + core::convert::TryFrom::try_from(u) + .expect("The selected range should be infallible if the docs match impl") + }) +} + +#[test] +fn all_valid_chars_do_not_panic_for_is_xid_start() { + for c in all_valid_chars() { + let _ = UnicodeXID::is_xid_start(c); + } +} + +#[test] +fn all_valid_chars_do_not_panic_for_is_xid_continue() { + for c in all_valid_chars() { + let _ = UnicodeXID::is_xid_continue(c); + } +} diff -Nru cargo-0.44.1/vendor/vcpkg/.cargo-checksum.json cargo-0.47.0/vendor/vcpkg/.cargo-checksum.json --- cargo-0.44.1/vendor/vcpkg/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/vcpkg/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"3fc439f2794e98976c88a2a2dafce96b930fe8010b0a256b3c2199a773933168"} \ No newline at end of file +{"files":{},"package":"6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/vcpkg/Cargo.toml cargo-0.47.0/vendor/vcpkg/Cargo.toml --- cargo-0.44.1/vendor/vcpkg/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/vcpkg/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,13 +12,13 @@ [package] name = "vcpkg" -version = "0.2.8" +version = "0.2.10" authors = ["Jim McGrath "] description = "A library to find native dependencies in a vcpkg tree at build\ntime in order to be used in Cargo build scripts.\n" documentation = "https://docs.rs/vcpkg" readme = "../README.md" -keywords = ["build-dependencies", "windows", "ffi", "win32"] -categories = ["os::windows-apis"] +keywords = ["build-dependencies", "windows", "macos", "linux"] +categories = ["development-tools::build-utils"] license = "MIT/Apache-2.0" repository = "https://github.com/mcgoo/vcpkg-rs" @@ -28,11 +28,3 @@ [dev-dependencies.tempdir] version = "0.3.7" -[badges.appveyor] -branch = "master" -repository = "mcgoo/vcpkg-rs" -service = "github" - -[badges.travis-ci] -branch = "master" -repository = "mcgoo/vcpkg-rs" diff -Nru cargo-0.44.1/vendor/vcpkg/src/lib.rs cargo-0.47.0/vendor/vcpkg/src/lib.rs --- cargo-0.44.1/vendor/vcpkg/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/vcpkg/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1,1312 +1,1387 @@ -//! A build dependency for Cargo libraries to find libraries in a -//! [Vcpkg](https://github.com/Microsoft/vcpkg) tree. From a Vcpkg package name -//! this build helper will emit cargo metadata to link it and it's dependencies -//! (excluding system libraries, which it cannot derive). -//! -//! **Note:** You must set one of `RUSTFLAGS=-Ctarget-feature=+crt-static` or -//! `VCPKGRS_DYNAMIC=1` in your environment or the vcpkg-rs helper -//! will not find any libraries. If `VCPKGRS_DYNAMIC` is set, `cargo install` will -//! generate dynamically linked binaries, in which case you will have to arrange for -//! dlls from your Vcpkg installation to be available in your path. -//! -//! The simplest possible usage looks like this :- -//! -//! ```rust,no_run -//! vcpkg::find_package("libssh2").unwrap(); -//! ``` -//! -//! The cargo metadata that is emitted can be changed like this :- -//! -//! ```rust,no_run -//! vcpkg::Config::new() -//! .emit_includes(true) -//! .find_package("zlib").unwrap(); -//! ``` -//! -//! If the search was successful all appropriate Cargo metadata will be printed -//! to stdout. -//! -//! The decision to choose static variants of libraries is driven by adding -//! `RUSTFLAGS=-Ctarget-feature=+crt-static` to the environment. This requires -//! at least Rust 1.19. -//! -//! A number of environment variables are available to globally configure which -//! libraries are selected. -//! -//! * `VCPKG_ROOT` - Set the directory to look in for a vcpkg installation. If -//! it is not set, vcpkg will use the user-wide installation if one has been -//! set up with `vcpkg integrate install` -//! -//! * `VCPKGRS_NO_FOO` - if set, vcpkg-rs will not attempt to find the -//! library named `foo`. -//! -//! * `VCPKGRS_DISABLE` - if set, vcpkg-rs will not attempt to find any libraries. -//! -//! * `VCPKGRS_DYNAMIC` - if set, vcpkg-rs will link to DLL builds of ports. -//! -//! There is a companion crate `vcpkg_cli` that allows testing of environment -//! and flag combinations. -//! -//! ```Batchfile -//! C:\src> vcpkg_cli probe -l static mysqlclient -//! Found library mysqlclient -//! Include paths: -//! C:\src\[..]\vcpkg\installed\x64-windows-static\include -//! Library paths: -//! C:\src\[..]\vcpkg\installed\x64-windows-static\lib -//! Cargo metadata: -//! cargo:rustc-link-search=native=C:\src\[..]\vcpkg\installed\x64-windows-static\lib -//! cargo:rustc-link-lib=static=mysqlclient -//! ``` - -// The CI will test vcpkg-rs on 1.10 because at this point rust-openssl's -// openssl-sys is backward compatible that far. (Actually, the oldest release -// crate openssl version 0.10 seems to build against is now Rust 1.24.1?) -#![allow(deprecated)] - -#[cfg(test)] -#[macro_use] -extern crate lazy_static; - -#[allow(unused_imports)] -use std::ascii::AsciiExt; - -use std::collections::BTreeMap; -use std::env; -use std::error; -use std::ffi::OsStr; -use std::fmt; -use std::fs::{self, File}; -use std::io::{BufRead, BufReader}; -use std::path::{Path, PathBuf}; - -#[derive(Default)] -pub struct Config { - /// should the cargo metadata actually be emitted - cargo_metadata: bool, - - /// should cargo:include= metadata be emitted (defaults to false) - emit_includes: bool, - - /// .lib/.a files that must be be found for probing to be considered successful - required_libs: Vec, - - /// .dlls that must be be found for probing to be considered successful - required_dlls: Vec, - - /// should DLLs be copies to OUT_DIR? - copy_dlls: bool, - - /// override VCPKG_ROOT environment variable - vcpkg_root: Option, -} - -/// Details of a package that was found -#[derive(Debug)] -pub struct Library { - /// Paths for the linker to search for static or import libraries - pub link_paths: Vec, - - /// Paths to search at runtme to find DLLs - pub dll_paths: Vec, - - /// Paths to search for - pub include_paths: Vec, - - /// cargo: metadata lines - pub cargo_metadata: Vec, - - /// libraries found are static - pub is_static: bool, - - /// DLLs found - pub found_dlls: Vec, - - /// static libs or import libs found - pub found_libs: Vec, - - /// link name of libraries found, this is useful to emit linker commands - pub found_names: Vec, - - /// ports that are providing the libraries to link to, in port link order - pub ports: Vec, -} - -enum MSVCTarget { - X86Windows, - X64Windows, - X64Linux, - X64MacOS, -} - -impl fmt::Display for MSVCTarget { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - MSVCTarget::X86Windows => write!(f, "x86-windows"), - MSVCTarget::X64Windows => write!(f, "x64-windows"), - MSVCTarget::X64Linux => write!(f, "x64-linux"), - MSVCTarget::X64MacOS => write!(f, "x64-osx"), - } - } -} - -#[derive(Debug)] // need Display? -pub enum Error { - /// Aborted because of a `VCPKGRS_NO_*` environment variable. - /// - /// Contains the name of the responsible environment variable. - DisabledByEnv(String), - - /// Aborted because a required environment variable was not set. - RequiredEnvMissing(String), - - /// On Windows, only MSVC ABI is supported - NotMSVC, - - /// Can't find a vcpkg tree - VcpkgNotFound(String), - - /// Library not found in vcpkg tree - LibNotFound(String), - - /// Could not understand vcpkg installation - VcpkgInstallation(String), - - #[doc(hidden)] - __Nonexhaustive, -} - -impl error::Error for Error { - fn description(&self) -> &str { - match *self { - Error::DisabledByEnv(_) => "vcpkg-rs requested to be aborted", - Error::RequiredEnvMissing(_) => "a required env setting is missing", - Error::NotMSVC => "vcpkg-rs only can only find libraries for MSVC ABI 64 bit builds", - Error::VcpkgNotFound(_) => "could not find Vcpkg tree", - Error::LibNotFound(_) => "could not find library in Vcpkg tree", - Error::VcpkgInstallation(_) => "could not look up details of packages in vcpkg tree", - Error::__Nonexhaustive => panic!(), - } - } - - fn cause(&self) -> Option<&error::Error> { - match *self { - // Error::Command { ref cause, .. } => Some(cause), - _ => None, - } - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match *self { - Error::DisabledByEnv(ref name) => write!(f, "Aborted because {} is set", name), - Error::RequiredEnvMissing(ref name) => write!(f, "Aborted because {} is not set", name), - Error::NotMSVC => write!( - f, - "the vcpkg-rs Vcpkg build helper can only find libraries built for the MSVC ABI." - ), - Error::VcpkgNotFound(ref detail) => write!(f, "Could not find Vcpkg tree: {}", detail), - Error::LibNotFound(ref detail) => { - write!(f, "Could not find library in Vcpkg tree {}", detail) - } - Error::VcpkgInstallation(ref detail) => write!( - f, - "Could not look up details of packages in vcpkg tree {}", - detail - ), - Error::__Nonexhaustive => panic!(), - } - } -} - -/// Deprecated in favor of the find_package function -#[doc(hidden)] -pub fn probe_package(name: &str) -> Result { - Config::new().probe(name) -} - -/// Find the package `package` in a Vcpkg tree. -/// -/// Emits cargo metadata to link to libraries provided by the Vcpkg package/port -/// named, and any (non-system) libraries that they depend on. -/// -/// This will select the architecture and linkage based on environment -/// variables and build flags as described in the module docs. -pub fn find_package(package: &str) -> Result { - Config::new().find_package(package) -} - -fn find_vcpkg_root(cfg: &Config) -> Result { - // prefer the setting from the use if there is one - if let &Some(ref path) = &cfg.vcpkg_root { - return Ok(path.clone()); - } - - // otherwise, use the setting from the environment - if let Some(path) = env::var_os("VCPKG_ROOT") { - return Ok(PathBuf::from(path)); - } - - // see if there is a per-user vcpkg tree that has been integrated into msbuild - // using `vcpkg integrate install` - let local_app_data = try!(env::var("LOCALAPPDATA").map_err(|_| Error::VcpkgNotFound( - "Failed to read either VCPKG_ROOT or LOCALAPPDATA environment variables".to_string() - ))); // not present or can't utf8 - let vcpkg_user_targets_path = Path::new(local_app_data.as_str()) - .join("vcpkg") - .join("vcpkg.user.targets"); - - let file = try!(File::open(vcpkg_user_targets_path.clone()).map_err(|_| { - Error::VcpkgNotFound( - "No vcpkg.user.targets found. Set the VCPKG_ROOT environment \ - variable or run 'vcpkg integrate install'" - .to_string(), - ) - })); - let file = BufReader::new(&file); - - for line in file.lines() { - let line = try!(line.map_err(|_| Error::VcpkgNotFound(format!( - "Parsing of {} failed.", - vcpkg_user_targets_path.to_string_lossy().to_owned() - )))); - let mut split = line.split("Project=\""); - split.next(); // eat anything before Project=" - if let Some(found) = split.next() { - // " is illegal in a Windows pathname - if let Some(found) = found.split_terminator('"').next() { - let mut vcpkg_root = PathBuf::from(found); - if !(vcpkg_root.pop() && vcpkg_root.pop() && vcpkg_root.pop() && vcpkg_root.pop()) { - return Err(Error::VcpkgNotFound(format!( - "Could not find vcpkg root above {}", - found - ))); - } - return Ok(vcpkg_root); - } - } - } - - Err(Error::VcpkgNotFound(format!( - "Project location not found parsing {}.", - vcpkg_user_targets_path.to_string_lossy().to_owned() - ))) -} - -fn validate_vcpkg_root(path: &PathBuf) -> Result<(), Error> { - let mut vcpkg_root_path = path.clone(); - vcpkg_root_path.push(".vcpkg-root"); - - if vcpkg_root_path.exists() { - Ok(()) - } else { - Err(Error::VcpkgNotFound(format!( - "Could not find Vcpkg root at {}", - vcpkg_root_path.to_string_lossy() - ))) - } -} - -fn find_vcpkg_target(cfg: &Config, msvc_target: &MSVCTarget) -> Result { - let vcpkg_root = try!(find_vcpkg_root(&cfg)); - try!(validate_vcpkg_root(&vcpkg_root)); - - let (static_lib, static_appendage, lib_suffix, strip_lib_prefix) = match msvc_target { - &MSVCTarget::X64Windows | &MSVCTarget::X86Windows => { - let static_lib = env::var("CARGO_CFG_TARGET_FEATURE") - .unwrap_or(String::new()) // rustc 1.10 - .contains("crt-static"); - let static_appendage = if static_lib { "-static" } else { "" }; - (static_lib, static_appendage, "lib", false) - } - _ => (true, "", "a", true), - }; - - let mut base = vcpkg_root; - base.push("installed"); - let status_path = base.join("vcpkg"); - - let vcpkg_triple = format!("{}{}", msvc_target.to_string(), static_appendage); - base.push(&vcpkg_triple); - - let lib_path = base.join("lib"); - let bin_path = base.join("bin"); - let include_path = base.join("include"); - - Ok(VcpkgTarget { - vcpkg_triple: vcpkg_triple, - lib_path: lib_path, - bin_path: bin_path, - include_path: include_path, - is_static: static_lib, - status_path: status_path, - lib_suffix: lib_suffix.to_owned(), - strip_lib_prefix: strip_lib_prefix, - }) -} - -#[derive(Clone, Debug)] -struct Port { - // dlls if any - dlls: Vec, - - // libs (static or import) - libs: Vec, - - // ports that this port depends on - deps: Vec, -} - -fn load_port_manifest( - path: &PathBuf, - port: &str, - version: &str, - vcpkg_target: &VcpkgTarget, -) -> Result<(Vec, Vec), Error> { - let manifest_file = path.join("info").join(format!( - "{}_{}_{}.list", - port, version, vcpkg_target.vcpkg_triple - )); - - let mut dlls = Vec::new(); - let mut libs = Vec::new(); - - let f = try!( - File::open(&manifest_file).map_err(|_| Error::VcpkgInstallation(format!( - "Could not open port manifest file {}", - manifest_file.display() - ))) - ); - - let file = BufReader::new(&f); - - let dll_prefix = Path::new(&vcpkg_target.vcpkg_triple).join("bin"); - let lib_prefix = Path::new(&vcpkg_target.vcpkg_triple).join("lib"); - - for line in file.lines() { - let line = line.unwrap(); - - let file_path = Path::new(&line); - - if let Ok(dll) = file_path.strip_prefix(&dll_prefix) { - if dll.extension() == Some(OsStr::new("dll")) - && dll.components().collect::>().len() == 1 - { - // match "mylib.dll" but not "debug/mylib.dll" or "manual_link/mylib.dll" - - dll.to_str().map(|s| dlls.push(s.to_owned())); - } - } else if let Ok(lib) = file_path.strip_prefix(&lib_prefix) { - if lib.extension() == Some(OsStr::new(&vcpkg_target.lib_suffix)) - && lib.components().collect::>().len() == 1 - { - if let Some(lib) = vcpkg_target.link_name_for_lib(lib) { - libs.push(lib); - } - } - } - } - - Ok((dlls, libs)) -} - -// load ports from the status file or one of the incremental updates -fn load_port_file( - filename: &PathBuf, - port_info: &mut Vec>, -) -> Result<(), Error> { - let f = try!( - File::open(&filename).map_err(|e| Error::VcpkgInstallation(format!( - "Could not open status file at {}: {}", - filename.display(), - e - ))) - ); - let file = BufReader::new(&f); - let mut current: BTreeMap = BTreeMap::new(); - for line in file.lines() { - let line = line.unwrap(); - let parts = line.splitn(2, ": ").clone().collect::>(); - if parts.len() == 2 { - // a key: value line - current.insert(parts[0].trim().into(), parts[1].trim().into()); - } else if line.len() == 0 { - // end of section - port_info.push(current.clone()); - current.clear(); - } else { - // ignore all extension lines of the form - // - // Description: a package with a - // very long description - // - // the description key is not used so this is harmless but - // this will eat extension lines for any multiline key which - // could become an issue in future - } - } - - if !current.is_empty() { - port_info.push(current); - } - - Ok(()) -} - -fn load_ports(target: &VcpkgTarget) -> Result, Error> { - let mut ports: BTreeMap = BTreeMap::new(); - - let mut port_info: Vec> = Vec::new(); - - // load the main status file. It is not an error if this file does not - // exist. If the only command that has been run in a Vcpkg installation - // is a single `vcpkg install package` then there will likely be no - // status file, only incremental updates. This is the typical case when - // running in a CI environment. - let status_filename = target.status_path.join("status"); - load_port_file(&status_filename, &mut port_info).ok(); - - // load updates to the status file that have yet to be normalized - let status_update_dir = target.status_path.join("updates"); - - let paths = try!( - fs::read_dir(status_update_dir).map_err(|e| Error::VcpkgInstallation(format!( - "could not read status file updates dir: {}", - e - ))) - ); - - // get all of the paths of the update files into a Vec - let mut paths = try!(paths - .map(|rde| rde.map(|de| de.path())) // Result -> Result - .collect::, _>>() // collect into Result, io::Error> - .map_err(|e| { - Error::VcpkgInstallation(format!( - "could not read status file update filenames: {}", - e - )) - })); - - // Sort the paths and read them. This could be done directly from the iterator if - // read_dir() guarantees that the files will be read in alpha order but that appears - // to be unspecified as the underlying operating system calls used are unspecified - // https://doc.rust-lang.org/nightly/std/fs/fn.read_dir.html#platform-specific-behavior - paths.sort(); - for path in paths { - // println!("Name: {}", path.display()); - try!(load_port_file(&path, &mut port_info)); - } - //println!("{:#?}", port_info); - - let mut seen_names = BTreeMap::new(); - for current in &port_info { - // store them by name and arch, clobbering older details - match ( - current.get("Package"), - current.get("Architecture"), - current.get("Feature"), - ) { - (Some(pkg), Some(arch), feature) => { - seen_names.insert((pkg, arch, feature), current); - } - _ => {} - } - } - - for (&(name, arch, feature), current) in &seen_names { - if **arch == target.vcpkg_triple { - let mut deps = if let Some(deps) = current.get("Depends") { - deps.split(", ").map(|x| x.to_owned()).collect() - } else { - Vec::new() - }; - - if current - .get("Status") - .unwrap_or(&String::new()) - .ends_with(" installed") - { - match (current.get("Version"), feature) { - (Some(version), _) => { - // this failing here and bailing out causes everything to fail - let lib_info = try!(load_port_manifest( - &target.status_path, - &name, - version, - &target - )); - let port = Port { - dlls: lib_info.0, - libs: lib_info.1, - deps: deps, - }; - - ports.insert(name.to_string(), port); - } - (_, Some(_feature)) => match ports.get_mut(name) { - Some(ref mut port) => { - port.deps.append(&mut deps); - } - _ => { - println!("found a feature that had no corresponding port :-"); - println!("current {:+?}", current); - continue; - } - }, - (_, _) => { - println!("didn't know how to deal with status file entry :-"); - println!("{:+?}", current); - continue; - } - } - } - } - } - - Ok(ports) -} - -/// paths and triple for the chosen target -struct VcpkgTarget { - vcpkg_triple: String, - lib_path: PathBuf, - bin_path: PathBuf, - include_path: PathBuf, - - // directory containing the status file - status_path: PathBuf, - - is_static: bool, - lib_suffix: String, - - /// strip 'lib' from library names in linker args? - strip_lib_prefix: bool, -} - -impl VcpkgTarget { - fn link_name_for_lib(&self, filename: &std::path::Path) -> Option { - if self.strip_lib_prefix { - filename.to_str().map(|s| s.to_owned()) - // filename - // .to_str() - // .map(|s| s.trim_left_matches("lib").to_owned()) - } else { - filename.to_str().map(|s| s.to_owned()) - } - } -} - -impl Config { - pub fn new() -> Config { - Config { - cargo_metadata: true, - copy_dlls: true, - ..Default::default() - // emit_includes: false, - // required_libs: Vec::new(), - } - } - - /// Override the name of the library to look for if it differs from the package name. - /// - /// This may be called more than once if multiple libs are required. - /// All libs must be found for the probe to succeed. `.probe()` must - /// be run with a different configuration to look for libraries under one of several names. - /// `.libname("ssleay32")` will look for ssleay32.lib and also ssleay32.dll if - /// dynamic linking is selected. - pub fn lib_name(&mut self, lib_stem: &str) -> &mut Config { - self.required_libs.push(lib_stem.to_owned()); - self.required_dlls.push(lib_stem.to_owned()); - self - } - - /// Override the name of the library to look for if it differs from the package name. - /// - /// This may be called more than once if multiple libs are required. - /// All libs must be found for the probe to succeed. `.probe()` must - /// be run with a different configuration to look for libraries under one of several names. - /// `.lib_names("libcurl_imp","curl")` will look for libcurl_imp.lib and also curl.dll if - /// dynamic linking is selected. - pub fn lib_names(&mut self, lib_stem: &str, dll_stem: &str) -> &mut Config { - self.required_libs.push(lib_stem.to_owned()); - self.required_dlls.push(dll_stem.to_owned()); - self - } - - /// Define whether metadata should be emitted for cargo allowing it to - /// automatically link the binary. Defaults to `true`. - pub fn cargo_metadata(&mut self, cargo_metadata: bool) -> &mut Config { - self.cargo_metadata = cargo_metadata; - self - } - - /// Define cargo:include= metadata should be emitted. Defaults to `false`. - pub fn emit_includes(&mut self, emit_includes: bool) -> &mut Config { - self.emit_includes = emit_includes; - self - } - - /// Should DLLs be copied to OUT_DIR? - /// Defaults to `true`. - pub fn copy_dlls(&mut self, copy_dlls: bool) -> &mut Config { - self.copy_dlls = copy_dlls; - self - } - - /// Define which path to use as vcpkg root overriding the VCPKG_ROOT environment variable - /// Default to `None`, which means use VCPKG_ROOT or try to find out automatically - pub fn vcpkg_root(&mut self, vcpkg_root: PathBuf) -> &mut Config { - self.vcpkg_root = Some(vcpkg_root); - self - } - - /// Find the library `port_name` in a Vcpkg tree. - /// - /// This will use all configuration previously set to select the - /// architecture and linkage. - /// Deprecated in favor of the find_package function - #[doc(hidden)] - pub fn probe(&mut self, port_name: &str) -> Result { - // determine the target type, bailing out if it is not some - // kind of msvc - let msvc_target = try!(msvc_target()); - - // bail out if requested to not try at all - if env::var_os("VCPKGRS_DISABLE").is_some() { - return Err(Error::DisabledByEnv("VCPKGRS_DISABLE".to_owned())); - } - - // bail out if requested to not try at all (old) - if env::var_os("NO_VCPKG").is_some() { - return Err(Error::DisabledByEnv("NO_VCPKG".to_owned())); - } - - // bail out if requested to skip this package - let abort_var_name = format!("VCPKGRS_NO_{}", envify(port_name)); - if env::var_os(&abort_var_name).is_some() { - return Err(Error::DisabledByEnv(abort_var_name)); - } - - // bail out if requested to skip this package (old) - let abort_var_name = format!("{}_NO_VCPKG", envify(port_name)); - if env::var_os(&abort_var_name).is_some() { - return Err(Error::DisabledByEnv(abort_var_name)); - } - - // if no overrides have been selected, then the Vcpkg port name - // is the the .lib name and the .dll name - if self.required_libs.is_empty() { - self.required_libs.push(port_name.to_owned()); - self.required_dlls.push(port_name.to_owned()); - } - - let vcpkg_target = try!(find_vcpkg_target(&self, &msvc_target)); - - // require explicit opt-in before using dynamically linked - // variants, otherwise cargo install of various things will - // stop working if Vcpkg is installed. - if !vcpkg_target.is_static && !env::var_os("VCPKGRS_DYNAMIC").is_some() { - return Err(Error::RequiredEnvMissing("VCPKGRS_DYNAMIC".to_owned())); - } - - let mut lib = Library::new(vcpkg_target.is_static); - - if self.emit_includes { - lib.cargo_metadata.push(format!( - "cargo:include={}", - vcpkg_target.include_path.display() - )); - } - lib.include_paths.push(vcpkg_target.include_path.clone()); - - lib.cargo_metadata.push(format!( - "cargo:rustc-link-search=native={}", - vcpkg_target - .lib_path - .to_str() - .expect("failed to convert string type") - )); - lib.link_paths.push(vcpkg_target.lib_path.clone()); - if !vcpkg_target.is_static { - lib.cargo_metadata.push(format!( - "cargo:rustc-link-search=native={}", - vcpkg_target - .bin_path - .to_str() - .expect("failed to convert string type") - )); - // this path is dropped by recent versions of cargo hence the copies to OUT_DIR below - lib.dll_paths.push(vcpkg_target.bin_path.clone()); - } - - try!(self.emit_libs(&mut lib, &vcpkg_target)); - - if self.copy_dlls { - try!(self.do_dll_copy(&mut lib)); - } - - if self.cargo_metadata { - for line in &lib.cargo_metadata { - println!("{}", line); - } - } - Ok(lib) - } - - fn emit_libs(&mut self, lib: &mut Library, vcpkg_target: &VcpkgTarget) -> Result<(), Error> { - for required_lib in &self.required_libs { - // this could use static-nobundle= for static libraries but it is apparently - // not necessary to make the distinction for windows-msvc. - - let link_name = match vcpkg_target.strip_lib_prefix { - true => required_lib.trim_left_matches("lib"), - false => required_lib, - }; - - lib.cargo_metadata - .push(format!("cargo:rustc-link-lib={}", link_name)); - - lib.found_names.push(String::from(link_name)); - - // verify that the library exists - let mut lib_location = vcpkg_target.lib_path.clone(); - lib_location.push(required_lib.clone() + "." + &vcpkg_target.lib_suffix); - - if !lib_location.exists() { - return Err(Error::LibNotFound(lib_location.display().to_string())); - } - lib.found_libs.push(lib_location); - } - - if !vcpkg_target.is_static { - for required_dll in &self.required_dlls { - let mut dll_location = vcpkg_target.bin_path.clone(); - dll_location.push(required_dll.clone() + ".dll"); - - // verify that the DLL exists - if !dll_location.exists() { - return Err(Error::LibNotFound(dll_location.display().to_string())); - } - lib.found_dlls.push(dll_location); - } - } - - Ok(()) - } - - fn do_dll_copy(&mut self, lib: &mut Library) -> Result<(), Error> { - if let Some(target_dir) = env::var_os("OUT_DIR") { - if !lib.found_dlls.is_empty() { - for file in &lib.found_dlls { - let mut dest_path = Path::new(target_dir.as_os_str()).to_path_buf(); - dest_path.push(Path::new(file.file_name().unwrap())); - try!( - fs::copy(file, &dest_path).map_err(|_| Error::LibNotFound(format!( - "Can't copy file {} to {}", - file.to_string_lossy(), - dest_path.to_string_lossy() - ))) - ); - println!( - "vcpkg build helper copied {} to {}", - file.to_string_lossy(), - dest_path.to_string_lossy() - ); - } - lib.cargo_metadata.push(format!( - "cargo:rustc-link-search=native={}", - env::var("OUT_DIR").unwrap() - )); - // work around https://github.com/rust-lang/cargo/issues/3957 - lib.cargo_metadata.push(format!( - "cargo:rustc-link-search={}", - env::var("OUT_DIR").unwrap() - )); - } - } else { - return Err(Error::LibNotFound("Unable to get OUT_DIR".to_owned())); - } - Ok(()) - } - - /// Find the package `port_name` in a Vcpkg tree. - /// - /// Emits cargo metadata to link to libraries provided by the Vcpkg package/port - /// named, and any (non-system) libraries that they depend on. - /// - /// This will select the architecture and linkage based on environment - /// variables and build flags as described in the module docs, and any configuration - /// set on the builder. - pub fn find_package(&mut self, port_name: &str) -> Result { - // determine the target type, bailing out if it is not some - // kind of msvc - let msvc_target = try!(msvc_target()); - - // bail out if requested to not try at all - if env::var_os("VCPKGRS_DISABLE").is_some() { - return Err(Error::DisabledByEnv("VCPKGRS_DISABLE".to_owned())); - } - - // bail out if requested to not try at all (old) - if env::var_os("NO_VCPKG").is_some() { - return Err(Error::DisabledByEnv("NO_VCPKG".to_owned())); - } - - // bail out if requested to skip this package - let abort_var_name = format!("VCPKGRS_NO_{}", envify(port_name)); - if env::var_os(&abort_var_name).is_some() { - return Err(Error::DisabledByEnv(abort_var_name)); - } - - // bail out if requested to skip this package (old) - let abort_var_name = format!("{}_NO_VCPKG", envify(port_name)); - if env::var_os(&abort_var_name).is_some() { - return Err(Error::DisabledByEnv(abort_var_name)); - } - - let vcpkg_target = try!(find_vcpkg_target(&self, &msvc_target)); - let mut required_port_order = Vec::new(); - - // if no overrides have been selected, then the Vcpkg port name - // is the the .lib name and the .dll name - if self.required_libs.is_empty() { - let ports = try!(load_ports(&vcpkg_target)); - - if ports.get(&port_name.to_owned()).is_none() { - return Err(Error::LibNotFound(port_name.to_owned())); - } - - // the complete set of ports required - let mut required_ports: BTreeMap = BTreeMap::new(); - // working of ports that we need to include - // let mut ports_to_scan: BTreeSet = BTreeSet::new(); - // ports_to_scan.insert(port_name.to_owned()); - let mut ports_to_scan = vec![port_name.to_owned()]; //: Vec = BTreeSet::new(); - - while !ports_to_scan.is_empty() { - let port_name = ports_to_scan.pop().unwrap(); - - if required_ports.contains_key(&port_name) { - continue; - } - - if let Some(port) = ports.get(&port_name) { - for dep in &port.deps { - ports_to_scan.push(dep.clone()); - } - required_ports.insert(port_name.clone(), (*port).clone()); - remove_item(&mut required_port_order, &port_name); - required_port_order.push(port_name); - } else { - // what? - } - } - - // for port in ports { - // println!("port {:?}", port); - // } - // println!("== Looking for port {}", port_name); - // for port in &required_port_order { - // println!("ordered required port {:?}", port); - // } - // println!("============================="); - // for port in &required_ports { - // println!("required port {:?}", port); - // } - - // if no overrides have been selected, then the Vcpkg port name - // is the the .lib name and the .dll name - if self.required_libs.is_empty() { - for port_name in &required_port_order { - let port = required_ports.get(port_name).unwrap(); - self.required_libs.extend(port.libs.iter().map(|s| { - Path::new(&s) - .file_stem() - .unwrap() - .to_string_lossy() - .into_owned() - })); - self.required_dlls - .extend(port.dlls.iter().cloned().map(|s| { - Path::new(&s) - .file_stem() - .unwrap() - .to_string_lossy() - .into_owned() - })); - } - } - } - // require explicit opt-in before using dynamically linked - // variants, otherwise cargo install of various things will - // stop working if Vcpkg is installed. - if !vcpkg_target.is_static && !env::var_os("VCPKGRS_DYNAMIC").is_some() { - return Err(Error::RequiredEnvMissing("VCPKGRS_DYNAMIC".to_owned())); - } - - let mut lib = Library::new(vcpkg_target.is_static); - - if self.emit_includes { - lib.cargo_metadata.push(format!( - "cargo:include={}", - vcpkg_target.include_path.display() - )); - } - lib.include_paths.push(vcpkg_target.include_path.clone()); - - lib.cargo_metadata.push(format!( - "cargo:rustc-link-search=native={}", - vcpkg_target - .lib_path - .to_str() - .expect("failed to convert string type") - )); - lib.link_paths.push(vcpkg_target.lib_path.clone()); - if !vcpkg_target.is_static { - lib.cargo_metadata.push(format!( - "cargo:rustc-link-search=native={}", - vcpkg_target - .bin_path - .to_str() - .expect("failed to convert string type") - )); - // this path is dropped by recent versions of cargo hence the copies to OUT_DIR below - lib.dll_paths.push(vcpkg_target.bin_path.clone()); - } - - lib.ports = required_port_order; - - try!(self.emit_libs(&mut lib, &vcpkg_target)); - - if self.copy_dlls { - try!(self.do_dll_copy(&mut lib)); - } - - if self.cargo_metadata { - for line in &lib.cargo_metadata { - println!("{}", line); - } - } - Ok(lib) - } -} - -fn remove_item(cont: &mut Vec, item: &String) -> Option { - match cont.iter().position(|x| *x == *item) { - Some(pos) => Some(cont.remove(pos)), - None => None, - } -} - -impl Library { - fn new(is_static: bool) -> Library { - Library { - link_paths: Vec::new(), - dll_paths: Vec::new(), - include_paths: Vec::new(), - cargo_metadata: Vec::new(), - is_static: is_static, - found_dlls: Vec::new(), - found_libs: Vec::new(), - found_names: Vec::new(), - ports: Vec::new(), - } - } -} - -fn envify(name: &str) -> String { - name.chars() - .map(|c| c.to_ascii_uppercase()) - .map(|c| if c == '-' { '_' } else { c }) - .collect() -} - -fn msvc_target() -> Result { - let target = env::var("TARGET").unwrap_or(String::new()); - if target == "x86_64-apple-darwin" { - Ok(MSVCTarget::X64MacOS) - } else if target == "x86_64-unknown-linux-gnu" { - Ok(MSVCTarget::X64Linux) - } else if !target.contains("-pc-windows-msvc") { - Err(Error::NotMSVC) - } else if target.starts_with("x86_64-") { - Ok(MSVCTarget::X64Windows) - } else { - // everything else is x86 - Ok(MSVCTarget::X86Windows) - } -} - -#[cfg(test)] -mod tests { - - extern crate tempdir; - - use super::*; - use std::env; - use std::sync::Mutex; - - lazy_static! { - static ref LOCK: Mutex<()> = Mutex::new(()); - } - - #[test] - fn do_nothing_for_unsupported_target() { - let _g = LOCK.lock(); - env::set_var("VCPKG_ROOT", "/"); - env::set_var("TARGET", "x86_64-pc-windows-gnu"); - assert!(match ::probe_package("foo") { - Err(Error::NotMSVC) => true, - _ => false, - }); - - env::set_var("TARGET", "x86_64-pc-windows-gnu"); - assert_eq!(env::var("TARGET"), Ok("x86_64-pc-windows-gnu".to_string())); - assert!(match ::probe_package("foo") { - Err(Error::NotMSVC) => true, - _ => false, - }); - env::remove_var("TARGET"); - env::remove_var("VCPKG_ROOT"); - } - - #[test] - fn do_nothing_for_bailout_variables_set() { - let _g = LOCK.lock(); - env::set_var("VCPKG_ROOT", "/"); - env::set_var("TARGET", "x86_64-pc-windows-msvc"); - - for &var in &[ - "VCPKGRS_DISABLE", - "VCPKGRS_NO_FOO", - "FOO_NO_VCPKG", - "NO_VCPKG", - ] { - env::set_var(var, "1"); - assert!(match ::probe_package("foo") { - Err(Error::DisabledByEnv(ref v)) if v == var => true, - _ => false, - }); - env::remove_var(var); - } - env::remove_var("TARGET"); - env::remove_var("VCPKG_ROOT"); - } - - // these tests are good but are leaning on a real vcpkg installation - - #[test] - fn default_build_refuses_dynamic() { - let _g = LOCK.lock(); - clean_env(); - env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("no-status")); - env::set_var("TARGET", "x86_64-pc-windows-msvc"); - println!("Result is {:?}", ::find_package("libmysql")); - assert!(match ::find_package("libmysql") { - Err(Error::RequiredEnvMissing(ref v)) if v == "VCPKGRS_DYNAMIC" => true, - _ => false, - }); - clean_env(); - } - - #[test] - fn static_build_finds_lib() { - let _g = LOCK.lock(); - clean_env(); - env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("normalized")); - env::set_var("TARGET", "x86_64-pc-windows-msvc"); - let tmp_dir = tempdir::TempDir::new("vcpkg_tests").unwrap(); - env::set_var("OUT_DIR", tmp_dir.path()); - - // CARGO_CFG_TARGET_FEATURE is set in response to - // RUSTFLAGS=-Ctarget-feature=+crt-static. It would - // be nice to test that also. - env::set_var("CARGO_CFG_TARGET_FEATURE", "crt-static"); - println!("Result is {:?}", ::find_package("libmysql")); - assert!(match ::find_package("libmysql") { - Ok(_) => true, - _ => false, - }); - clean_env(); - } - - #[test] - fn dynamic_build_finds_lib() { - let _g = LOCK.lock(); - clean_env(); - env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("no-status")); - env::set_var("TARGET", "x86_64-pc-windows-msvc"); - env::set_var("VCPKGRS_DYNAMIC", "1"); - let tmp_dir = tempdir::TempDir::new("vcpkg_tests").unwrap(); - env::set_var("OUT_DIR", tmp_dir.path()); - - println!("Result is {:?}", ::find_package("libmysql")); - assert!(match ::find_package("libmysql") { - Ok(_) => true, - _ => false, - }); - clean_env(); - } - - #[test] - fn handle_multiline_description() { - let _g = LOCK.lock(); - clean_env(); - env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("multiline-description")); - env::set_var("TARGET", "i686-pc-windows-msvc"); - env::set_var("VCPKGRS_DYNAMIC", "1"); - let tmp_dir = tempdir::TempDir::new("vcpkg_tests").unwrap(); - env::set_var("OUT_DIR", tmp_dir.path()); - - println!("Result is {:?}", ::find_package("graphite2")); - assert!(match ::find_package("graphite2") { - Ok(_) => true, - _ => false, - }); - clean_env(); - } - - #[test] - fn link_libs_required_by_optional_features() { - let _g = LOCK.lock(); - clean_env(); - env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("normalized")); - env::set_var("TARGET", "i686-pc-windows-msvc"); - env::set_var("VCPKGRS_DYNAMIC", "1"); - let tmp_dir = tempdir::TempDir::new("vcpkg_tests").unwrap(); - env::set_var("OUT_DIR", tmp_dir.path()); - - println!("Result is {:?}", ::find_package("harfbuzz")); - assert!(match ::find_package("harfbuzz") { - Ok(lib) => lib - .cargo_metadata - .iter() - .find(|&x| x == "cargo:rustc-link-lib=icuuc") - .is_some(), - _ => false, - }); - clean_env(); - } - - #[test] - fn link_lib_name_is_correct() { - let _g = LOCK.lock(); - - for target in &[ - "x86_64-apple-darwin", - "i686-pc-windows-msvc", - // "x86_64-pc-windows-msvc", - // "x86_64-unknown-linux-gnu", - ] { - clean_env(); - env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("normalized")); - env::set_var("TARGET", target); - env::set_var("VCPKGRS_DYNAMIC", "1"); - let tmp_dir = tempdir::TempDir::new("vcpkg_tests").unwrap(); - env::set_var("OUT_DIR", tmp_dir.path()); - - println!("Result is {:?}", ::find_package("harfbuzz")); - assert!(match ::find_package("harfbuzz") { - Ok(lib) => lib - .cargo_metadata - .iter() - .find(|&x| x == "cargo:rustc-link-lib=harfbuzz") - .is_some(), - _ => false, - }); - clean_env(); - } - } - - #[test] - fn link_dependencies_after_port() { - let _g = LOCK.lock(); - clean_env(); - env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("normalized")); - env::set_var("TARGET", "i686-pc-windows-msvc"); - env::set_var("VCPKGRS_DYNAMIC", "1"); - let tmp_dir = tempdir::TempDir::new("vcpkg_tests").unwrap(); - env::set_var("OUT_DIR", tmp_dir.path()); - - let lib = ::find_package("harfbuzz").unwrap(); - - check_before(&lib, "freetype", "zlib"); - check_before(&lib, "freetype", "bzip2"); - check_before(&lib, "freetype", "libpng"); - check_before(&lib, "harfbuzz", "freetype"); - check_before(&lib, "harfbuzz", "ragel"); - check_before(&lib, "libpng", "zlib"); - - clean_env(); - - fn check_before(lib: &Library, earlier: &str, later: &str) { - match ( - lib.ports.iter().position(|x| *x == *earlier), - lib.ports.iter().position(|x| *x == *later), - ) { - (Some(earlier_pos), Some(later_pos)) if earlier_pos < later_pos => { - // ok - } - _ => { - println!( - "earlier: {}, later: {}\nLibrary found: {:#?}", - earlier, later, lib - ); - panic!(); - } - } - } - } - // #[test] - // fn dynamic_build_package_specific_bailout() { - // clean_env(); - // env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("no-status")); - // env::set_var("TARGET", "x86_64-pc-windows-msvc"); - // env::set_var("VCPKGRS_DYNAMIC", "1"); - // env::set_var("VCPKGRS_NO_LIBMYSQL", "1"); - - // println!("Result is {:?}", ::find_package("libmysql")); - // assert!(match ::find_package("libmysql") { - // Err(Error::DisabledByEnv(ref v)) if v == "VCPKGRS_NO_LIBMYSQL" => true, - // _ => false, - // }); - // clean_env(); - // } - - // #[test] - // fn dynamic_build_global_bailout() { - // clean_env(); - // env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("no-status")); - // env::set_var("TARGET", "x86_64-pc-windows-msvc"); - // env::set_var("VCPKGRS_DYNAMIC", "1"); - // env::set_var("VCPKGRS_DISABLE", "1"); - - // println!("Result is {:?}", ::find_package("libmysql")); - // assert!(match ::find_package("libmysql") { - // Err(Error::DisabledByEnv(ref v)) if v == "VCPKGRS_DISABLE" => true, - // _ => false, - // }); - // clean_env(); - // } - - fn clean_env() { - env::remove_var("TARGET"); - env::remove_var("VCPKG_ROOT"); - env::remove_var("VCPKGRS_DYNAMIC"); - env::remove_var("RUSTFLAGS"); - env::remove_var("CARGO_CFG_TARGET_FEATURE"); - env::remove_var("VCPKGRS_DISABLE"); - env::remove_var("VCPKGRS_NO_LIBMYSQL"); - } - - // path to a to vcpkg installation to test against - fn vcpkg_test_tree_loc(name: &str) -> PathBuf { - let mut path = PathBuf::new(); - path.push(env::var("CARGO_MANIFEST_DIR").unwrap()); - path.pop(); - path.push("test-data"); - path.push(name); - path - } -} +//! A build dependency for Cargo libraries to find libraries in a +//! [Vcpkg](https://github.com/microsoft/vcpkg) tree +//! +//! From a Vcpkg package name +//! this build helper will emit cargo metadata to link it and it's dependencies +//! (excluding system libraries, which it does not determine). +//! +//! The simplest possible usage looks like this :- +//! +//! ```rust,no_run +//! vcpkg::find_package("libssh2").unwrap(); +//! ``` +//! +//! The cargo metadata that is emitted can be changed like this :- +//! +//! ```rust,no_run +//! vcpkg::Config::new() +//! .emit_includes(true) +//! .find_package("zlib").unwrap(); +//! ``` +//! +//! If the search was successful all appropriate Cargo metadata will be printed +//! to stdout. +//! +//! ## Static vs. dynamic linking +//! At this time, vcpkg itself only has a single triplet on macOS and Linux which builds +//! static link versions of libraries, which works well with Rust. +//! On Windows there are three +//! configurations that are supported for 64-bit builds and another three for 32-bit. +//! The default 64-bit configuration is `x64-windows-static-md` which is a +//! [community supported](https://github.com/microsoft/vcpkg/blob/master/docs/users/triplets.md#community-triplets) +//! configuration that is a good match for Rust - dynamically linking to the C runtime, +//! and statically linking to the packages in vcpkg. Another option is to build a fully static +//! binary using `RUSTFLAGS=-Ctarget-feature=+crt-static`. This will link to libraries built +//! with vcpkg triplet `x64-windows-static`. For dynamic linking, set `VCPKGRS_DYNAMIC=1` in the +//! environment. This will link to libraries built with vcpkg triplet `x64-windows`. If `VCPKGRS_DYNAMIC` is set, `cargo install` will +//! generate dynamically linked binaries, in which case you will have to arrange for +//! dlls from your Vcpkg installation to be available in your path. +//! +//! A number of environment variables are available to globally configure which +//! libraries are selected. +//! +//! * `VCPKG_ROOT` - Set the directory to look in for a vcpkg installation. If +//! it is not set, vcpkg will use the user-wide installation if one has been +//! set up with `vcpkg integrate install`, and check the crate source and target +//! to see if a vcpkg tree has been created by [cargo-vcpkg](https://crates.io/crates/cargo-vcpkg). +//! +//! * `VCPKGRS_NO_FOO` - if set, vcpkg-rs will not attempt to find the +//! library named `foo`. +//! +//! * `VCPKGRS_DISABLE` - if set, vcpkg-rs will not attempt to find any libraries. +//! +//! * `VCPKGRS_DYNAMIC` - if set, vcpkg-rs will link to DLL builds of ports. +//! +//! There is a companion crate `vcpkg_cli` that allows testing of environment +//! and flag combinations. +//! +//! ```Batchfile +//! C:\src> vcpkg_cli probe -l static mysqlclient +//! Found library mysqlclient +//! Include paths: +//! C:\src\[..]\vcpkg\installed\x64-windows-static\include +//! Library paths: +//! C:\src\[..]\vcpkg\installed\x64-windows-static\lib +//! Cargo metadata: +//! cargo:rustc-link-search=native=C:\src\[..]\vcpkg\installed\x64-windows-static\lib +//! cargo:rustc-link-lib=static=mysqlclient +//! ``` + +// The CI will test vcpkg-rs on 1.10 because at this point rust-openssl's +// openssl-sys is backward compatible that far. (Actually, the oldest release +// crate openssl version 0.10 seems to build against is now Rust 1.24.1?) +#![allow(deprecated)] +#![allow(warnings)] + +#[cfg(test)] +#[macro_use] +extern crate lazy_static; + +#[allow(unused_imports)] +use std::ascii::AsciiExt; + +use std::collections::BTreeMap; +use std::env; +use std::error; +use std::ffi::OsStr; +use std::fmt; +use std::fs::{self, File}; +use std::io::{BufRead, BufReader}; +use std::path::{Path, PathBuf}; + +/// Configuration options for finding packages, setting up the tree and emitting metadata to cargo +#[derive(Default)] +pub struct Config { + /// should the cargo metadata actually be emitted + cargo_metadata: bool, + + /// should cargo:include= metadata be emitted (defaults to false) + emit_includes: bool, + + /// .lib/.a files that must be be found for probing to be considered successful + required_libs: Vec, + + /// .dlls that must be be found for probing to be considered successful + required_dlls: Vec, + + /// should DLLs be copied to OUT_DIR? + copy_dlls: bool, + + /// override VCPKG_ROOT environment variable + vcpkg_root: Option, +} + +/// Details of a package that was found +#[derive(Debug)] +pub struct Library { + /// Paths for the linker to search for static or import libraries + pub link_paths: Vec, + + /// Paths to search at runtme to find DLLs + pub dll_paths: Vec, + + /// Paths to include files + pub include_paths: Vec, + + /// cargo: metadata lines + pub cargo_metadata: Vec, + + /// libraries found are static + pub is_static: bool, + + /// DLLs found + pub found_dlls: Vec, + + /// static libs or import libs found + pub found_libs: Vec, + + /// link name of libraries found, this is useful to emit linker commands + pub found_names: Vec, + + /// ports that are providing the libraries to link to, in port link order + pub ports: Vec, +} + +struct TargetTriplet { + triplet: String, + is_static: bool, + lib_suffix: String, + strip_lib_prefix: bool, +} + +#[derive(Debug)] // need Display? +pub enum Error { + /// Aborted because of a `VCPKGRS_NO_*` environment variable. + /// + /// Contains the name of the responsible environment variable. + DisabledByEnv(String), + + /// Aborted because a required environment variable was not set. + RequiredEnvMissing(String), + + /// On Windows, only MSVC ABI is supported + NotMSVC, + + /// Can't find a vcpkg tree + VcpkgNotFound(String), + + /// Library not found in vcpkg tree + LibNotFound(String), + + /// Could not understand vcpkg installation + VcpkgInstallation(String), + + #[doc(hidden)] + __Nonexhaustive, +} + +impl error::Error for Error { + fn description(&self) -> &str { + match *self { + Error::DisabledByEnv(_) => "vcpkg-rs requested to be aborted", + Error::RequiredEnvMissing(_) => "a required env setting is missing", + Error::NotMSVC => "vcpkg-rs only can only find libraries for MSVC ABI builds", + Error::VcpkgNotFound(_) => "could not find Vcpkg tree", + Error::LibNotFound(_) => "could not find library in Vcpkg tree", + Error::VcpkgInstallation(_) => "could not look up details of packages in vcpkg tree", + Error::__Nonexhaustive => panic!(), + } + } + + fn cause(&self) -> Option<&error::Error> { + match *self { + // Error::Command { ref cause, .. } => Some(cause), + _ => None, + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match *self { + Error::DisabledByEnv(ref name) => write!(f, "Aborted because {} is set", name), + Error::RequiredEnvMissing(ref name) => write!(f, "Aborted because {} is not set", name), + Error::NotMSVC => write!( + f, + "the vcpkg-rs Vcpkg build helper can only find libraries built for the MSVC ABI." + ), + Error::VcpkgNotFound(ref detail) => write!(f, "Could not find Vcpkg tree: {}", detail), + Error::LibNotFound(ref detail) => { + write!(f, "Could not find library in Vcpkg tree {}", detail) + } + Error::VcpkgInstallation(ref detail) => write!( + f, + "Could not look up details of packages in vcpkg tree {}", + detail + ), + Error::__Nonexhaustive => panic!(), + } + } +} + +/// Deprecated in favor of the find_package function +#[doc(hidden)] +pub fn probe_package(name: &str) -> Result { + Config::new().probe(name) +} + +/// Find the package `package` in a Vcpkg tree. +/// +/// Emits cargo metadata to link to libraries provided by the Vcpkg package/port +/// named, and any (non-system) libraries that they depend on. +/// +/// This will select the architecture and linkage based on environment +/// variables and build flags as described in the module docs. +pub fn find_package(package: &str) -> Result { + Config::new().find_package(package) +} + +/// Find the vcpkg root +#[doc(hidden)] +pub fn find_vcpkg_root(cfg: &Config) -> Result { + // prefer the setting from the use if there is one + if let &Some(ref path) = &cfg.vcpkg_root { + return Ok(path.clone()); + } + + // otherwise, use the setting from the environment + if let Some(path) = env::var_os("VCPKG_ROOT") { + return Ok(PathBuf::from(path)); + } + + // see if there is a per-user vcpkg tree that has been integrated into msbuild + // using `vcpkg integrate install` + if let Ok(ref local_app_data) = env::var("LOCALAPPDATA") { + let vcpkg_user_targets_path = Path::new(local_app_data.as_str()) + .join("vcpkg") + .join("vcpkg.user.targets"); + + if let Ok(file) = File::open(vcpkg_user_targets_path.clone()) { + let file = BufReader::new(&file); + + for line in file.lines() { + let line = try!(line.map_err(|_| Error::VcpkgNotFound(format!( + "Parsing of {} failed.", + vcpkg_user_targets_path.to_string_lossy().to_owned() + )))); + let mut split = line.split("Project=\""); + split.next(); // eat anything before Project=" + if let Some(found) = split.next() { + // " is illegal in a Windows pathname + if let Some(found) = found.split_terminator('"').next() { + let mut vcpkg_root = PathBuf::from(found); + if !(vcpkg_root.pop() + && vcpkg_root.pop() + && vcpkg_root.pop() + && vcpkg_root.pop()) + { + return Err(Error::VcpkgNotFound(format!( + "Could not find vcpkg root above {}", + found + ))); + } + return Ok(vcpkg_root); + } + } + } + + // return Err(Error::VcpkgNotFound(format!( + // "Project location not found parsing {}.", + // vcpkg_user_targets_path.to_string_lossy().to_owned() + // ))); + } + } + + // walk up the directory structure and see if it is there + if let Some(path) = env::var_os("OUT_DIR") { + // path.ancestors() is supported from Rust 1.28 + let mut path = PathBuf::from(path); + while path.pop() { + let mut try_root = path.clone(); + try_root.push("vcpkg"); + try_root.push(".vcpkg-root"); + if try_root.exists() { + try_root.pop(); + + // this could walk up beyond the target directory and find a vcpkg installation + // that would not have been found by previous versions of vcpkg-rs, so this + // checks that the vcpkg tree was created by cargo-vcpkg and ignores it if not. + let mut cv_cfg = try_root.clone(); + cv_cfg.push("downloads"); + cv_cfg.push("cargo-vcpkg.toml"); + if cv_cfg.exists() { + return Ok(try_root); + } + } + } + } + + Err(Error::VcpkgNotFound( + "No vcpkg installation found. Set the VCPKG_ROOT environment \ + variable or run 'vcpkg integrate install'" + .to_string(), + )) +} + +fn validate_vcpkg_root(path: &PathBuf) -> Result<(), Error> { + let mut vcpkg_root_path = path.clone(); + vcpkg_root_path.push(".vcpkg-root"); + + if vcpkg_root_path.exists() { + Ok(()) + } else { + Err(Error::VcpkgNotFound(format!( + "Could not find Vcpkg root at {}", + vcpkg_root_path.to_string_lossy() + ))) + } +} + +fn find_vcpkg_target(cfg: &Config, target_triplet: &TargetTriplet) -> Result { + let vcpkg_root = try!(find_vcpkg_root(&cfg)); + try!(validate_vcpkg_root(&vcpkg_root)); + + let mut base = vcpkg_root; + base.push("installed"); + let status_path = base.join("vcpkg"); + + base.push(&target_triplet.triplet); + + let lib_path = base.join("lib"); + let bin_path = base.join("bin"); + let include_path = base.join("include"); + + Ok(VcpkgTarget { + vcpkg_triplet: target_triplet.triplet.to_owned(), + lib_path: lib_path, + bin_path: bin_path, + include_path: include_path, + is_static: target_triplet.is_static, + status_path: status_path, + lib_suffix: target_triplet.lib_suffix.to_owned(), + strip_lib_prefix: target_triplet.strip_lib_prefix, + }) +} + +#[derive(Clone, Debug)] +struct Port { + // dlls if any + dlls: Vec, + + // libs (static or import) + libs: Vec, + + // ports that this port depends on + deps: Vec, +} + +fn load_port_manifest( + path: &PathBuf, + port: &str, + version: &str, + vcpkg_target: &VcpkgTarget, +) -> Result<(Vec, Vec), Error> { + let manifest_file = path.join("info").join(format!( + "{}_{}_{}.list", + port, version, vcpkg_target.vcpkg_triplet + )); + + let mut dlls = Vec::new(); + let mut libs = Vec::new(); + + let f = try!( + File::open(&manifest_file).map_err(|_| Error::VcpkgInstallation(format!( + "Could not open port manifest file {}", + manifest_file.display() + ))) + ); + + let file = BufReader::new(&f); + + let dll_prefix = Path::new(&vcpkg_target.vcpkg_triplet).join("bin"); + let lib_prefix = Path::new(&vcpkg_target.vcpkg_triplet).join("lib"); + + for line in file.lines() { + let line = line.unwrap(); + + let file_path = Path::new(&line); + + if let Ok(dll) = file_path.strip_prefix(&dll_prefix) { + if dll.extension() == Some(OsStr::new("dll")) + && dll.components().collect::>().len() == 1 + { + // match "mylib.dll" but not "debug/mylib.dll" or "manual_link/mylib.dll" + + dll.to_str().map(|s| dlls.push(s.to_owned())); + } + } else if let Ok(lib) = file_path.strip_prefix(&lib_prefix) { + if lib.extension() == Some(OsStr::new(&vcpkg_target.lib_suffix)) + && lib.components().collect::>().len() == 1 + { + if let Some(lib) = vcpkg_target.link_name_for_lib(lib) { + libs.push(lib); + } + } + } + } + + Ok((dlls, libs)) +} + +// load ports from the status file or one of the incremental updates +fn load_port_file( + filename: &PathBuf, + port_info: &mut Vec>, +) -> Result<(), Error> { + let f = try!( + File::open(&filename).map_err(|e| Error::VcpkgInstallation(format!( + "Could not open status file at {}: {}", + filename.display(), + e + ))) + ); + let file = BufReader::new(&f); + let mut current: BTreeMap = BTreeMap::new(); + for line in file.lines() { + let line = line.unwrap(); + let parts = line.splitn(2, ": ").clone().collect::>(); + if parts.len() == 2 { + // a key: value line + current.insert(parts[0].trim().into(), parts[1].trim().into()); + } else if line.len() == 0 { + // end of section + port_info.push(current.clone()); + current.clear(); + } else { + // ignore all extension lines of the form + // + // Description: a package with a + // very long description + // + // the description key is not used so this is harmless but + // this will eat extension lines for any multiline key which + // could become an issue in future + } + } + + if !current.is_empty() { + port_info.push(current); + } + + Ok(()) +} + +fn load_ports(target: &VcpkgTarget) -> Result, Error> { + let mut ports: BTreeMap = BTreeMap::new(); + + let mut port_info: Vec> = Vec::new(); + + // load the main status file. It is not an error if this file does not + // exist. If the only command that has been run in a Vcpkg installation + // is a single `vcpkg install package` then there will likely be no + // status file, only incremental updates. This is the typical case when + // running in a CI environment. + let status_filename = target.status_path.join("status"); + load_port_file(&status_filename, &mut port_info).ok(); + + // load updates to the status file that have yet to be normalized + let status_update_dir = target.status_path.join("updates"); + + let paths = try!( + fs::read_dir(status_update_dir).map_err(|e| Error::VcpkgInstallation(format!( + "could not read status file updates dir: {}", + e + ))) + ); + + // get all of the paths of the update files into a Vec + let mut paths = try!(paths + .map(|rde| rde.map(|de| de.path())) // Result -> Result + .collect::, _>>() // collect into Result, io::Error> + .map_err(|e| { + Error::VcpkgInstallation(format!( + "could not read status file update filenames: {}", + e + )) + })); + + // Sort the paths and read them. This could be done directly from the iterator if + // read_dir() guarantees that the files will be read in alpha order but that appears + // to be unspecified as the underlying operating system calls used are unspecified + // https://doc.rust-lang.org/nightly/std/fs/fn.read_dir.html#platform-specific-behavior + paths.sort(); + for path in paths { + // println!("Name: {}", path.display()); + try!(load_port_file(&path, &mut port_info)); + } + //println!("{:#?}", port_info); + + let mut seen_names = BTreeMap::new(); + for current in &port_info { + // store them by name and arch, clobbering older details + match ( + current.get("Package"), + current.get("Architecture"), + current.get("Feature"), + ) { + (Some(pkg), Some(arch), feature) => { + seen_names.insert((pkg, arch, feature), current); + } + _ => {} + } + } + + for (&(name, arch, feature), current) in &seen_names { + if **arch == target.vcpkg_triplet { + let mut deps = if let Some(deps) = current.get("Depends") { + deps.split(", ").map(|x| x.to_owned()).collect() + } else { + Vec::new() + }; + + if current + .get("Status") + .unwrap_or(&String::new()) + .ends_with(" installed") + { + match (current.get("Version"), feature) { + (Some(version), _) => { + // this failing here and bailing out causes everything to fail + let lib_info = try!(load_port_manifest( + &target.status_path, + &name, + version, + &target + )); + let port = Port { + dlls: lib_info.0, + libs: lib_info.1, + deps: deps, + }; + + ports.insert(name.to_string(), port); + } + (_, Some(_feature)) => match ports.get_mut(name) { + Some(ref mut port) => { + port.deps.append(&mut deps); + } + _ => { + println!("found a feature that had no corresponding port :-"); + println!("current {:+?}", current); + continue; + } + }, + (_, _) => { + println!("didn't know how to deal with status file entry :-"); + println!("{:+?}", current); + continue; + } + } + } + } + } + + Ok(ports) +} + +/// paths and triple for the chosen target +struct VcpkgTarget { + vcpkg_triplet: String, + lib_path: PathBuf, + bin_path: PathBuf, + include_path: PathBuf, + + // directory containing the status file + status_path: PathBuf, + + is_static: bool, + lib_suffix: String, + + /// strip 'lib' from library names in linker args? + strip_lib_prefix: bool, +} + +impl VcpkgTarget { + fn link_name_for_lib(&self, filename: &std::path::Path) -> Option { + if self.strip_lib_prefix { + filename.to_str().map(|s| s.to_owned()) + // filename + // .to_str() + // .map(|s| s.trim_left_matches("lib").to_owned()) + } else { + filename.to_str().map(|s| s.to_owned()) + } + } +} + +impl Config { + pub fn new() -> Config { + Config { + cargo_metadata: true, + copy_dlls: true, + ..Default::default() + } + } + + /// Find the package `port_name` in a Vcpkg tree. + /// + /// Emits cargo metadata to link to libraries provided by the Vcpkg package/port + /// named, and any (non-system) libraries that they depend on. + /// + /// This will select the architecture and linkage based on environment + /// variables and build flags as described in the module docs, and any configuration + /// set on the builder. + pub fn find_package(&mut self, port_name: &str) -> Result { + // determine the target type, bailing out if it is not some + // kind of msvc + let msvc_target = try!(msvc_target()); + + // bail out if requested to not try at all + if env::var_os("VCPKGRS_DISABLE").is_some() { + return Err(Error::DisabledByEnv("VCPKGRS_DISABLE".to_owned())); + } + + // bail out if requested to not try at all (old) + if env::var_os("NO_VCPKG").is_some() { + return Err(Error::DisabledByEnv("NO_VCPKG".to_owned())); + } + + // bail out if requested to skip this package + let abort_var_name = format!("VCPKGRS_NO_{}", envify(port_name)); + if env::var_os(&abort_var_name).is_some() { + return Err(Error::DisabledByEnv(abort_var_name)); + } + + // bail out if requested to skip this package (old) + let abort_var_name = format!("{}_NO_VCPKG", envify(port_name)); + if env::var_os(&abort_var_name).is_some() { + return Err(Error::DisabledByEnv(abort_var_name)); + } + + let vcpkg_target = try!(find_vcpkg_target(&self, &msvc_target)); + let mut required_port_order = Vec::new(); + + // if no overrides have been selected, then the Vcpkg port name + // is the the .lib name and the .dll name + if self.required_libs.is_empty() { + let ports = try!(load_ports(&vcpkg_target)); + + if ports.get(&port_name.to_owned()).is_none() { + return Err(Error::LibNotFound(format!( + "package {} is not installed for vcpkg triplet {}", + port_name.to_owned(), + vcpkg_target.vcpkg_triplet + ))); + } + + // the complete set of ports required + let mut required_ports: BTreeMap = BTreeMap::new(); + // working of ports that we need to include + // let mut ports_to_scan: BTreeSet = BTreeSet::new(); + // ports_to_scan.insert(port_name.to_owned()); + let mut ports_to_scan = vec![port_name.to_owned()]; //: Vec = BTreeSet::new(); + + while !ports_to_scan.is_empty() { + let port_name = ports_to_scan.pop().unwrap(); + + if required_ports.contains_key(&port_name) { + continue; + } + + if let Some(port) = ports.get(&port_name) { + for dep in &port.deps { + ports_to_scan.push(dep.clone()); + } + required_ports.insert(port_name.clone(), (*port).clone()); + remove_item(&mut required_port_order, &port_name); + required_port_order.push(port_name); + } else { + // what? + } + } + + // for port in ports { + // println!("port {:?}", port); + // } + // println!("== Looking for port {}", port_name); + // for port in &required_port_order { + // println!("ordered required port {:?}", port); + // } + // println!("============================="); + // for port in &required_ports { + // println!("required port {:?}", port); + // } + + // if no overrides have been selected, then the Vcpkg port name + // is the the .lib name and the .dll name + if self.required_libs.is_empty() { + for port_name in &required_port_order { + let port = required_ports.get(port_name).unwrap(); + self.required_libs.extend(port.libs.iter().map(|s| { + Path::new(&s) + .file_stem() + .unwrap() + .to_string_lossy() + .into_owned() + })); + self.required_dlls + .extend(port.dlls.iter().cloned().map(|s| { + Path::new(&s) + .file_stem() + .unwrap() + .to_string_lossy() + .into_owned() + })); + } + } + } + // require explicit opt-in before using dynamically linked + // variants, otherwise cargo install of various things will + // stop working if Vcpkg is installed. + if !vcpkg_target.is_static && !env::var_os("VCPKGRS_DYNAMIC").is_some() { + return Err(Error::RequiredEnvMissing("VCPKGRS_DYNAMIC".to_owned())); + } + + let mut lib = Library::new(vcpkg_target.is_static); + + if self.emit_includes { + lib.cargo_metadata.push(format!( + "cargo:include={}", + vcpkg_target.include_path.display() + )); + } + lib.include_paths.push(vcpkg_target.include_path.clone()); + + lib.cargo_metadata.push(format!( + "cargo:rustc-link-search=native={}", + vcpkg_target + .lib_path + .to_str() + .expect("failed to convert string type") + )); + lib.link_paths.push(vcpkg_target.lib_path.clone()); + if !vcpkg_target.is_static { + lib.cargo_metadata.push(format!( + "cargo:rustc-link-search=native={}", + vcpkg_target + .bin_path + .to_str() + .expect("failed to convert string type") + )); + // this path is dropped by recent versions of cargo hence the copies to OUT_DIR below + lib.dll_paths.push(vcpkg_target.bin_path.clone()); + } + + lib.ports = required_port_order; + + try!(self.emit_libs(&mut lib, &vcpkg_target)); + + if self.copy_dlls { + try!(self.do_dll_copy(&mut lib)); + } + + if self.cargo_metadata { + for line in &lib.cargo_metadata { + println!("{}", line); + } + } + Ok(lib) + } + + /// Define whether metadata should be emitted for cargo allowing it to + /// automatically link the binary. Defaults to `true`. + pub fn cargo_metadata(&mut self, cargo_metadata: bool) -> &mut Config { + self.cargo_metadata = cargo_metadata; + self + } + + /// Define cargo:include= metadata should be emitted. Defaults to `false`. + pub fn emit_includes(&mut self, emit_includes: bool) -> &mut Config { + self.emit_includes = emit_includes; + self + } + + /// Should DLLs be copied to OUT_DIR? + /// Defaults to `true`. + pub fn copy_dlls(&mut self, copy_dlls: bool) -> &mut Config { + self.copy_dlls = copy_dlls; + self + } + + /// Define which path to use as vcpkg root overriding the VCPKG_ROOT environment variable + /// Default to `None`, which means use VCPKG_ROOT or try to find out automatically + pub fn vcpkg_root(&mut self, vcpkg_root: PathBuf) -> &mut Config { + self.vcpkg_root = Some(vcpkg_root); + self + } + + /// Find the library `port_name` in a Vcpkg tree. + /// + /// This will use all configuration previously set to select the + /// architecture and linkage. + /// Deprecated in favor of the find_package function + #[doc(hidden)] + pub fn probe(&mut self, port_name: &str) -> Result { + // determine the target type, bailing out if it is not some + // kind of msvc + let msvc_target = try!(msvc_target()); + + // bail out if requested to not try at all + if env::var_os("VCPKGRS_DISABLE").is_some() { + return Err(Error::DisabledByEnv("VCPKGRS_DISABLE".to_owned())); + } + + // bail out if requested to not try at all (old) + if env::var_os("NO_VCPKG").is_some() { + return Err(Error::DisabledByEnv("NO_VCPKG".to_owned())); + } + + // bail out if requested to skip this package + let abort_var_name = format!("VCPKGRS_NO_{}", envify(port_name)); + if env::var_os(&abort_var_name).is_some() { + return Err(Error::DisabledByEnv(abort_var_name)); + } + + // bail out if requested to skip this package (old) + let abort_var_name = format!("{}_NO_VCPKG", envify(port_name)); + if env::var_os(&abort_var_name).is_some() { + return Err(Error::DisabledByEnv(abort_var_name)); + } + + // if no overrides have been selected, then the Vcpkg port name + // is the the .lib name and the .dll name + if self.required_libs.is_empty() { + self.required_libs.push(port_name.to_owned()); + self.required_dlls.push(port_name.to_owned()); + } + + let vcpkg_target = try!(find_vcpkg_target(&self, &msvc_target)); + + // require explicit opt-in before using dynamically linked + // variants, otherwise cargo install of various things will + // stop working if Vcpkg is installed. + if !vcpkg_target.is_static && !env::var_os("VCPKGRS_DYNAMIC").is_some() { + return Err(Error::RequiredEnvMissing("VCPKGRS_DYNAMIC".to_owned())); + } + + let mut lib = Library::new(vcpkg_target.is_static); + + if self.emit_includes { + lib.cargo_metadata.push(format!( + "cargo:include={}", + vcpkg_target.include_path.display() + )); + } + lib.include_paths.push(vcpkg_target.include_path.clone()); + + lib.cargo_metadata.push(format!( + "cargo:rustc-link-search=native={}", + vcpkg_target + .lib_path + .to_str() + .expect("failed to convert string type") + )); + lib.link_paths.push(vcpkg_target.lib_path.clone()); + if !vcpkg_target.is_static { + lib.cargo_metadata.push(format!( + "cargo:rustc-link-search=native={}", + vcpkg_target + .bin_path + .to_str() + .expect("failed to convert string type") + )); + // this path is dropped by recent versions of cargo hence the copies to OUT_DIR below + lib.dll_paths.push(vcpkg_target.bin_path.clone()); + } + + try!(self.emit_libs(&mut lib, &vcpkg_target)); + + if self.copy_dlls { + try!(self.do_dll_copy(&mut lib)); + } + + if self.cargo_metadata { + for line in &lib.cargo_metadata { + println!("{}", line); + } + } + Ok(lib) + } + + fn emit_libs(&mut self, lib: &mut Library, vcpkg_target: &VcpkgTarget) -> Result<(), Error> { + for required_lib in &self.required_libs { + // this could use static-nobundle= for static libraries but it is apparently + // not necessary to make the distinction for windows-msvc. + + let link_name = match vcpkg_target.strip_lib_prefix { + true => required_lib.trim_left_matches("lib"), + false => required_lib, + }; + + lib.cargo_metadata + .push(format!("cargo:rustc-link-lib={}", link_name)); + + lib.found_names.push(String::from(link_name)); + + // verify that the library exists + let mut lib_location = vcpkg_target.lib_path.clone(); + lib_location.push(required_lib.clone() + "." + &vcpkg_target.lib_suffix); + + if !lib_location.exists() { + return Err(Error::LibNotFound(lib_location.display().to_string())); + } + lib.found_libs.push(lib_location); + } + + if !vcpkg_target.is_static { + for required_dll in &self.required_dlls { + let mut dll_location = vcpkg_target.bin_path.clone(); + dll_location.push(required_dll.clone() + ".dll"); + + // verify that the DLL exists + if !dll_location.exists() { + return Err(Error::LibNotFound(dll_location.display().to_string())); + } + lib.found_dlls.push(dll_location); + } + } + + Ok(()) + } + + fn do_dll_copy(&mut self, lib: &mut Library) -> Result<(), Error> { + if let Some(target_dir) = env::var_os("OUT_DIR") { + if !lib.found_dlls.is_empty() { + for file in &lib.found_dlls { + let mut dest_path = Path::new(target_dir.as_os_str()).to_path_buf(); + dest_path.push(Path::new(file.file_name().unwrap())); + try!( + fs::copy(file, &dest_path).map_err(|_| Error::LibNotFound(format!( + "Can't copy file {} to {}", + file.to_string_lossy(), + dest_path.to_string_lossy() + ))) + ); + println!( + "vcpkg build helper copied {} to {}", + file.to_string_lossy(), + dest_path.to_string_lossy() + ); + } + lib.cargo_metadata.push(format!( + "cargo:rustc-link-search=native={}", + env::var("OUT_DIR").unwrap() + )); + // work around https://github.com/rust-lang/cargo/issues/3957 + lib.cargo_metadata.push(format!( + "cargo:rustc-link-search={}", + env::var("OUT_DIR").unwrap() + )); + } + } else { + return Err(Error::LibNotFound("Unable to get OUT_DIR".to_owned())); + } + Ok(()) + } + + /// Override the name of the library to look for if it differs from the package name. + /// + /// This may be called more than once if multiple libs are required. + /// All libs must be found for the probe to succeed. `.probe()` must + /// be run with a different configuration to look for libraries under one of several names. + /// `.libname("ssleay32")` will look for ssleay32.lib and also ssleay32.dll if + /// dynamic linking is selected. + pub fn lib_name(&mut self, lib_stem: &str) -> &mut Config { + self.required_libs.push(lib_stem.to_owned()); + self.required_dlls.push(lib_stem.to_owned()); + self + } + + /// Override the name of the library to look for if it differs from the package name. + /// + /// This may be called more than once if multiple libs are required. + /// All libs must be found for the probe to succeed. `.probe()` must + /// be run with a different configuration to look for libraries under one of several names. + /// `.lib_names("libcurl_imp","curl")` will look for libcurl_imp.lib and also curl.dll if + /// dynamic linking is selected. + pub fn lib_names(&mut self, lib_stem: &str, dll_stem: &str) -> &mut Config { + self.required_libs.push(lib_stem.to_owned()); + self.required_dlls.push(dll_stem.to_owned()); + self + } +} + +fn remove_item(cont: &mut Vec, item: &String) -> Option { + match cont.iter().position(|x| *x == *item) { + Some(pos) => Some(cont.remove(pos)), + None => None, + } +} + +impl Library { + fn new(is_static: bool) -> Library { + Library { + link_paths: Vec::new(), + dll_paths: Vec::new(), + include_paths: Vec::new(), + cargo_metadata: Vec::new(), + is_static: is_static, + found_dlls: Vec::new(), + found_libs: Vec::new(), + found_names: Vec::new(), + ports: Vec::new(), + } + } +} + +fn envify(name: &str) -> String { + name.chars() + .map(|c| c.to_ascii_uppercase()) + .map(|c| if c == '-' { '_' } else { c }) + .collect() +} + +fn msvc_target() -> Result { + let is_definitely_dynamic = env::var("VCPKGRS_DYNAMIC").is_ok(); + let target = env::var("TARGET").unwrap_or(String::new()); + let is_static = env::var("CARGO_CFG_TARGET_FEATURE") + .unwrap_or(String::new()) // rustc 1.10 + .contains("crt-static"); + if target == "x86_64-apple-darwin" { + Ok(TargetTriplet { + triplet: "x64-osx".into(), + is_static: true, + lib_suffix: "a".into(), + strip_lib_prefix: true, + }) + } else if target == "x86_64-unknown-linux-gnu" { + Ok(TargetTriplet { + triplet: "x64-linux".into(), + is_static: true, + lib_suffix: "a".into(), + strip_lib_prefix: true, + }) + } else if !target.contains("-pc-windows-msvc") { + Err(Error::NotMSVC) + } else if target.starts_with("x86_64-") { + if is_static { + Ok(TargetTriplet { + triplet: "x64-windows-static".into(), + is_static: true, + lib_suffix: "lib".into(), + strip_lib_prefix: false, + }) + } else if is_definitely_dynamic { + Ok(TargetTriplet { + triplet: "x64-windows".into(), + is_static: false, + lib_suffix: "lib".into(), + strip_lib_prefix: false, + }) + } else { + Ok(TargetTriplet { + triplet: "x64-windows-static-md".into(), + is_static: true, + lib_suffix: "lib".into(), + strip_lib_prefix: false, + }) + } + } else { + // everything else is x86 + if is_static { + Ok(TargetTriplet { + triplet: "x86-windows-static".into(), + is_static: true, + lib_suffix: "lib".into(), + strip_lib_prefix: false, + }) + } else if is_definitely_dynamic { + Ok(TargetTriplet { + triplet: "x86-windows".into(), + is_static: false, + lib_suffix: "lib".into(), + strip_lib_prefix: false, + }) + } else { + Ok(TargetTriplet { + triplet: "x86-windows-static-md".into(), + is_static: true, + lib_suffix: "lib".into(), + strip_lib_prefix: false, + }) + } + } +} + +#[cfg(test)] +mod tests { + + extern crate tempdir; + + use super::*; + use std::env; + use std::sync::Mutex; + + lazy_static! { + static ref LOCK: Mutex<()> = Mutex::new(()); + } + + #[test] + fn do_nothing_for_unsupported_target() { + let _g = LOCK.lock(); + env::set_var("VCPKG_ROOT", "/"); + env::set_var("TARGET", "x86_64-pc-windows-gnu"); + assert!(match ::probe_package("foo") { + Err(Error::NotMSVC) => true, + _ => false, + }); + + env::set_var("TARGET", "x86_64-pc-windows-gnu"); + assert_eq!(env::var("TARGET"), Ok("x86_64-pc-windows-gnu".to_string())); + assert!(match ::probe_package("foo") { + Err(Error::NotMSVC) => true, + _ => false, + }); + env::remove_var("TARGET"); + env::remove_var("VCPKG_ROOT"); + } + + #[test] + fn do_nothing_for_bailout_variables_set() { + let _g = LOCK.lock(); + env::set_var("VCPKG_ROOT", "/"); + env::set_var("TARGET", "x86_64-pc-windows-msvc"); + + for &var in &[ + "VCPKGRS_DISABLE", + "VCPKGRS_NO_FOO", + "FOO_NO_VCPKG", + "NO_VCPKG", + ] { + env::set_var(var, "1"); + assert!(match ::probe_package("foo") { + Err(Error::DisabledByEnv(ref v)) if v == var => true, + _ => false, + }); + env::remove_var(var); + } + env::remove_var("TARGET"); + env::remove_var("VCPKG_ROOT"); + } + + // these tests are good but are leaning on a real vcpkg installation + + // #[test] + // fn default_build_refuses_dynamic() { + // let _g = LOCK.lock(); + // clean_env(); + // env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("no-status")); + // env::set_var("TARGET", "x86_64-pc-windows-msvc"); + // println!("Result is {:?}", ::find_package("libmysql")); + // assert!(match ::find_package("libmysql") { + // Err(Error::RequiredEnvMissing(ref v)) if v == "VCPKGRS_DYNAMIC" => true, + // _ => false, + // }); + // clean_env(); + // } + + #[test] + fn static_build_finds_lib() { + let _g = LOCK.lock(); + clean_env(); + env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("normalized")); + env::set_var("TARGET", "x86_64-pc-windows-msvc"); + let tmp_dir = tempdir::TempDir::new("vcpkg_tests").unwrap(); + env::set_var("OUT_DIR", tmp_dir.path()); + + // CARGO_CFG_TARGET_FEATURE is set in response to + // RUSTFLAGS=-Ctarget-feature=+crt-static. It would + // be nice to test that also. + env::set_var("CARGO_CFG_TARGET_FEATURE", "crt-static"); + println!("Result is {:?}", ::find_package("libmysql")); + assert!(match ::find_package("libmysql") { + Ok(_) => true, + _ => false, + }); + clean_env(); + } + + #[test] + fn dynamic_build_finds_lib() { + let _g = LOCK.lock(); + clean_env(); + env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("no-status")); + env::set_var("TARGET", "x86_64-pc-windows-msvc"); + env::set_var("VCPKGRS_DYNAMIC", "1"); + let tmp_dir = tempdir::TempDir::new("vcpkg_tests").unwrap(); + env::set_var("OUT_DIR", tmp_dir.path()); + + println!("Result is {:?}", ::find_package("libmysql")); + assert!(match ::find_package("libmysql") { + Ok(_) => true, + _ => false, + }); + clean_env(); + } + + #[test] + fn handle_multiline_description() { + let _g = LOCK.lock(); + clean_env(); + env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("multiline-description")); + env::set_var("TARGET", "i686-pc-windows-msvc"); + env::set_var("VCPKGRS_DYNAMIC", "1"); + let tmp_dir = tempdir::TempDir::new("vcpkg_tests").unwrap(); + env::set_var("OUT_DIR", tmp_dir.path()); + + println!("Result is {:?}", ::find_package("graphite2")); + assert!(match ::find_package("graphite2") { + Ok(_) => true, + _ => false, + }); + clean_env(); + } + + #[test] + fn link_libs_required_by_optional_features() { + let _g = LOCK.lock(); + clean_env(); + env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("normalized")); + env::set_var("TARGET", "i686-pc-windows-msvc"); + env::set_var("VCPKGRS_DYNAMIC", "1"); + let tmp_dir = tempdir::TempDir::new("vcpkg_tests").unwrap(); + env::set_var("OUT_DIR", tmp_dir.path()); + + println!("Result is {:?}", ::find_package("harfbuzz")); + assert!(match ::find_package("harfbuzz") { + Ok(lib) => lib + .cargo_metadata + .iter() + .find(|&x| x == "cargo:rustc-link-lib=icuuc") + .is_some(), + _ => false, + }); + clean_env(); + } + + #[test] + fn link_lib_name_is_correct() { + let _g = LOCK.lock(); + + for target in &[ + "x86_64-apple-darwin", + "i686-pc-windows-msvc", + // "x86_64-pc-windows-msvc", + // "x86_64-unknown-linux-gnu", + ] { + clean_env(); + env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("normalized")); + env::set_var("TARGET", target); + env::set_var("VCPKGRS_DYNAMIC", "1"); + let tmp_dir = tempdir::TempDir::new("vcpkg_tests").unwrap(); + env::set_var("OUT_DIR", tmp_dir.path()); + + println!("Result is {:?}", ::find_package("harfbuzz")); + assert!(match ::find_package("harfbuzz") { + Ok(lib) => lib + .cargo_metadata + .iter() + .find(|&x| x == "cargo:rustc-link-lib=harfbuzz") + .is_some(), + _ => false, + }); + clean_env(); + } + } + + #[test] + fn link_dependencies_after_port() { + let _g = LOCK.lock(); + clean_env(); + env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("normalized")); + env::set_var("TARGET", "i686-pc-windows-msvc"); + env::set_var("VCPKGRS_DYNAMIC", "1"); + let tmp_dir = tempdir::TempDir::new("vcpkg_tests").unwrap(); + env::set_var("OUT_DIR", tmp_dir.path()); + + let lib = ::find_package("harfbuzz").unwrap(); + + check_before(&lib, "freetype", "zlib"); + check_before(&lib, "freetype", "bzip2"); + check_before(&lib, "freetype", "libpng"); + check_before(&lib, "harfbuzz", "freetype"); + check_before(&lib, "harfbuzz", "ragel"); + check_before(&lib, "libpng", "zlib"); + + clean_env(); + + fn check_before(lib: &Library, earlier: &str, later: &str) { + match ( + lib.ports.iter().position(|x| *x == *earlier), + lib.ports.iter().position(|x| *x == *later), + ) { + (Some(earlier_pos), Some(later_pos)) if earlier_pos < later_pos => { + // ok + } + _ => { + println!( + "earlier: {}, later: {}\nLibrary found: {:#?}", + earlier, later, lib + ); + panic!(); + } + } + } + } + // #[test] + // fn dynamic_build_package_specific_bailout() { + // clean_env(); + // env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("no-status")); + // env::set_var("TARGET", "x86_64-pc-windows-msvc"); + // env::set_var("VCPKGRS_DYNAMIC", "1"); + // env::set_var("VCPKGRS_NO_LIBMYSQL", "1"); + + // println!("Result is {:?}", ::find_package("libmysql")); + // assert!(match ::find_package("libmysql") { + // Err(Error::DisabledByEnv(ref v)) if v == "VCPKGRS_NO_LIBMYSQL" => true, + // _ => false, + // }); + // clean_env(); + // } + + // #[test] + // fn dynamic_build_global_bailout() { + // clean_env(); + // env::set_var("VCPKG_ROOT", vcpkg_test_tree_loc("no-status")); + // env::set_var("TARGET", "x86_64-pc-windows-msvc"); + // env::set_var("VCPKGRS_DYNAMIC", "1"); + // env::set_var("VCPKGRS_DISABLE", "1"); + + // println!("Result is {:?}", ::find_package("libmysql")); + // assert!(match ::find_package("libmysql") { + // Err(Error::DisabledByEnv(ref v)) if v == "VCPKGRS_DISABLE" => true, + // _ => false, + // }); + // clean_env(); + // } + + fn clean_env() { + env::remove_var("TARGET"); + env::remove_var("VCPKG_ROOT"); + env::remove_var("VCPKGRS_DYNAMIC"); + env::remove_var("RUSTFLAGS"); + env::remove_var("CARGO_CFG_TARGET_FEATURE"); + env::remove_var("VCPKGRS_DISABLE"); + env::remove_var("VCPKGRS_NO_LIBMYSQL"); + } + + // path to a to vcpkg installation to test against + fn vcpkg_test_tree_loc(name: &str) -> PathBuf { + let mut path = PathBuf::new(); + path.push(env::var("CARGO_MANIFEST_DIR").unwrap()); + path.pop(); + path.push("test-data"); + path.push(name); + path + } +} diff -Nru cargo-0.44.1/vendor/winapi/build.rs cargo-0.47.0/vendor/winapi/build.rs --- cargo-0.44.1/vendor/winapi/build.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/build.rs 2020-10-01 21:38:28.000000000 +0000 @@ -46,19 +46,26 @@ ("hidpi", &["hidusage", "minwindef", "ntdef", "ntstatus", "winnt"], &["hid"]), ("hidsdi", &["guiddef", "hidpi", "minwindef", "winnt"], &["hid"]), ("hidusage", &["minwindef"], &[]), - ("ifdef", &["basetsd", "minwindef"], &[]), + ("ifdef", &["basetsd", "guiddef", "ntdef"], &[]), + ("ifmib", &["ifdef", "ipifcons", "minwindef", "ntdef"], &[]), ("in6addr", &["minwindef"], &[]), ("inaddr", &["minwindef"], &[]), ("intsafe", &[], &[]), + ("ipifcons", &["minwindef"], &[]), + ("ipmib", &["ifdef", "ifmib", "minwindef", "nldef", "ntdef"], &[]), + ("iprtrmib", &["ipmib", "minwindef", "ntdef"], &[]), ("ks", &[], &[]), ("ksmedia", &["minwindef"], &[]), ("ktmtypes", &["guiddef", "minwindef", "winnt"], &[]), ("lmcons", &["minwindef", "winnt"], &[]), ("minwindef", &["basetsd", "ntdef"], &[]), ("mmreg", &["guiddef", "minwindef"], &[]), + ("mprapidef", &[], &[]), ("mstcpip", &["basetsd", "guiddef", "in6addr", "inaddr", "minwindef", "winnt", "ws2def"], &["ntdll"]), ("mswsockdef", &["minwindef", "winnt", "ws2def"], &[]), - ("netioapi", &["basetsd", "guiddef", "ifdef", "minwindef", "ntdef"], &["iphlpapi"]), + ("netioapi", &["basetsd", "guiddef", "ifdef", "ipifcons", "minwindef", "nldef", "ntddndis", "ntdef", "ws2def", "ws2ipdef"], &["iphlpapi"]), + ("nldef", &["basetsd", "minwindef", "ntdef"], &[]), + ("ntddndis", &["ifdef", "minwindef"], &[]), ("ntddscsi", &["basetsd", "minwindef", "ntdef", "winioctl", "winnt"], &[]), ("ntddser", &["devpropdef"], &[]), ("ntdef", &["basetsd", "guiddef"], &[]), @@ -70,15 +77,22 @@ ("sddl", &["basetsd", "minwindef", "winnt"], &["advapi32"]), ("sspi", &["basetsd", "guiddef", "minwindef", "subauth", "wincred", "winnt"], &["credui", "secur32"]), ("stralign", &["vcruntime", "winnt"], &["kernel32"]), + ("tcpestats", &["basetsd", "ntdef"], &[]), + ("tcpmib", &["basetsd", "in6addr", "minwindef", "ntdef"], &[]), ("transportsettingcommon", &["guiddef"], &[]), ("tvout", &["guiddef", "minwindef"], &[]), + ("udpmib", &["basetsd", "in6addr", "minwindef", "ntdef"], &[]), ("usb", &["minwindef", "usbspec", "winnt"], &[]), + ("usbioctl", &["basetsd", "guiddef", "minwindef", "ntdef", "usb", "usbiodef", "usbspec", "winioctl"], &[]), ("usbiodef", &["guiddef", "minwindef", "winioctl", "winnt"], &[]), + ("usbscan", &["ntdef", "winioctl"], &[]), ("usbspec", &["basetsd", "guiddef", "minwindef", "winnt"], &[]), ("windef", &["minwindef", "winnt"], &[]), + ("windot11", &["basetsd", "minwindef", "ntddndis", "winnt", "wlantypes"], &[]), ("windowsx", &["minwindef"], &[]), ("winerror", &["minwindef", "wtypesbase"], &[]), ("winusbio", &["minwindef", "usb"], &[]), + ("wlantypes", &["basetsd", "minwindef"], &[]), ("wmistr", &["basetsd", "guiddef", "minwindef", "winnt"], &[]), ("wnnc", &["minwindef"], &[]), ("ws2def", &["basetsd", "guiddef", "inaddr", "minwindef", "vcruntime", "winnt"], &[]), @@ -86,9 +100,11 @@ ("wtypes", &["guiddef", "minwindef", "ntdef", "rpcndr", "wingdi", "wtypesbase"], &[]), ("wtypesbase", &["minwindef", "rpcndr", "winnt"], &[]), // ucrt + ("corecrt", &[], &[]), // um ("accctrl", &["guiddef", "minwindef", "winbase", "winnt"], &[]), ("aclapi", &["accctrl", "guiddef", "minwindef", "winnt"], &["advapi32"]), + ("adhoc", &["guiddef", "minwindef", "unknwnbase", "winnt"], &[]), ("appmgmt", &["guiddef", "minwindef", "winnt"], &["advapi32"]), ("audioclient", &["audiosessiontypes", "basetsd", "guiddef", "minwindef", "mmreg", "strmif", "unknwnbase", "winerror", "winnt", "wtypesbase"], &[]), ("audiosessiontypes", &["minwindef"], &[]), @@ -171,6 +187,7 @@ ("dmusicc", &[], &[]), ("docobj", &["guiddef", "minwindef", "oaidl", "unknwnbase", "winnt"], &[]), ("documenttarget", &["basetsd", "guiddef", "ntdef", "unknwnbase"], &[]), + ("dot1x", &["eaptypes", "guiddef", "l2cmn", "minwindef", "winnt"], &[]), ("dpa_dsa", &["basetsd", "minwindef", "winnt"], &["comctl32"]), ("dpapi", &["minwindef", "wincrypt", "windef", "winnt"], &["crypt32"]), ("dsgetdc", &["guiddef", "minwindef", "ntsecapi", "winnt", "ws2def"], &["netapi32"]), @@ -187,6 +204,7 @@ ("dxgidebug", &["basetsd", "guiddef", "minwindef", "unknwnbase", "winnt"], &["dxgi"]), ("dxva2api", &["basetsd", "d3d9", "d3d9types", "guiddef", "minwindef", "unknwnbase", "windef", "winnt"], &["dxva2"]), ("dxvahd", &["d3d9", "d3d9types", "guiddef", "minwindef", "unknwnbase", "windef", "winnt"], &["dxva2"]), + ("eaptypes", &["guiddef", "minwindef", "winnt"], &[]), ("endpointvolume", &["basetsd", "guiddef", "minwindef", "unknwnbase", "winnt"], &[]), ("errhandlingapi", &["basetsd", "minwindef", "winnt"], &["kernel32"]), ("evntcons", &["basetsd", "evntprov", "evntrace", "guiddef", "minwindef", "winnt"], &["advapi32"]), @@ -198,14 +216,18 @@ ("handleapi", &["minwindef", "winnt"], &["kernel32"]), ("heapapi", &["basetsd", "minwinbase", "minwindef", "winnt"], &["kernel32"]), ("highlevelmonitorconfigurationapi", &["minwindef", "physicalmonitorenumerationapi", "winnt"], &["dxva2"]), - ("http", &["guiddef", "minwinbase", "minwindef", "sspi", "winnt", "ws2def"], &["winhttp"]), + ("http", &["guiddef", "minwinbase", "minwindef", "sspi", "winnt", "ws2def"], &["httpapi"]), ("imm", &["minwindef", "windef"], &["imm32"]), ("interlockedapi", &["minwindef", "winnt"], &["kernel32"]), ("ioapiset", &["basetsd", "minwinbase", "minwindef", "winnt"], &["kernel32"]), + ("ipexport", &["basetsd", "in6addr", "ntdef"], &[]), + ("iphlpapi", &["basetsd", "ifdef", "ifmib", "ipexport", "ipmib", "iprtrmib", "iptypes", "minwinbase", "minwindef", "ntdef", "tcpestats", "tcpmib", "udpmib", "ws2def", "ws2ipdef"], &["iphlpapi"]), + ("iptypes", &["basetsd", "corecrt", "guiddef", "ifdef", "ipifcons", "minwindef", "nldef", "ntdef", "ws2def"], &[]), ("jobapi", &["minwindef", "winnt"], &["kernel32"]), ("jobapi2", &["basetsd", "minwinbase", "minwindef", "ntdef", "winnt"], &["kernel32"]), ("knownfolders", &[], &[]), ("ktmw32", &["guiddef", "minwinbase", "minwindef", "winnt"], &["ktmw32"]), + ("l2cmn", &["guiddef", "minwindef", "winnt"], &[]), ("libloaderapi", &["basetsd", "minwindef", "winnt"], &["kernel32", "user32"]), ("lmaccess", &["basetsd", "lmcons", "minwindef", "winnt"], &["netapi32"]), ("lmalert", &["lmcons", "minwindef", "winnt"], &["netapi32"]), @@ -278,6 +300,7 @@ ("restartmanager", &["minwindef", "winnt"], &["rstrtmgr"]), ("restrictederrorinfo", &["unknwnbase", "winnt", "wtypes"], &[]), ("rmxfguid", &[], &[]), + ("rtinfo", &["basetsd"], &[]), ("sapi", &["guiddef", "minwindef", "sapi53", "unknwnbase", "winnt"], &[]), ("sapi51", &["guiddef", "minwindef", "mmreg", "oaidl", "objidlbase", "rpcndr", "servprov", "unknwnbase", "windef", "winnt", "wtypes", "wtypesbase"], &[]), ("sapi53", &["guiddef", "minwindef", "oaidl", "sapi51", "unknwnbase", "urlmon", "winnt", "wtypes"], &[]), @@ -291,9 +314,10 @@ ("shellapi", &["basetsd", "guiddef", "minwinbase", "minwindef", "processthreadsapi", "windef", "winnt", "winuser"], &["shell32", "shlwapi"]), ("shellscalingapi", &["minwindef", "windef", "winnt"], &["shcore"]), ("shlobj", &["guiddef", "minwinbase", "minwindef", "shtypes", "windef", "winnt"], &["shell32"]), - ("shobjidl", &["guiddef", "minwindef", "objidl", "propkeydef", "propsys", "shobjidl_core", "shtypes", "unknwnbase", "windef", "winnt"], &[]), - ("shobjidl_core", &["commctrl", "guiddef", "minwindef", "objidl", "unknwnbase", "windef", "winnt"], &[]), + ("shobjidl", &["guiddef", "minwindef", "propsys", "shobjidl_core", "shtypes", "unknwnbase", "windef", "winnt"], &[]), + ("shobjidl_core", &["commctrl", "guiddef", "minwinbase", "minwindef", "objidl", "propkeydef", "propsys", "shtypes", "unknwnbase", "windef", "winnt"], &[]), ("shtypes", &["guiddef", "minwindef", "winnt"], &[]), + ("softpub", &[], &[]), ("spapidef", &["minwindef", "winnt"], &[]), ("spellcheck", &["minwindef", "ntdef", "objidlbase", "unknwnbase", "winerror"], &[]), ("sporder", &["guiddef", "minwindef"], &["sporder"]), @@ -350,21 +374,27 @@ ("winnetwk", &["basetsd", "minwindef", "windef", "winerror", "winnt"], &["mpr"]), ("winnls", &["basetsd", "guiddef", "minwinbase", "minwindef", "winnt"], &["kernel32"]), ("winnt", &["basetsd", "excpt", "guiddef", "ktmtypes", "minwindef", "ntdef", "vcruntime"], &["kernel32"]), - ("winreg", &["basetsd", "minwinbase", "minwindef", "winnt"], &["advapi32"]), + ("winreg", &["basetsd", "minwinbase", "minwindef", "reason", "winnt"], &["advapi32"]), ("winsafer", &["basetsd", "guiddef", "minwindef", "wincrypt", "windef", "winnt"], &["advapi32"]), ("winscard", &["basetsd", "guiddef", "minwindef", "rpcdce", "windef", "winnt", "winsmcrd"], &["winscard"]), ("winsmcrd", &["minwindef", "winioctl"], &[]), ("winsock2", &["basetsd", "guiddef", "inaddr", "minwinbase", "minwindef", "qos", "winbase", "windef", "winerror", "winnt", "ws2def", "wtypesbase"], &["ws2_32"]), ("winspool", &["guiddef", "minwinbase", "minwindef", "vcruntime", "windef", "winerror", "wingdi", "winnt"], &["winspool"]), ("winsvc", &["minwindef", "winnt"], &["advapi32"]), + ("wintrust", &["guiddef", "minwindef", "ntdef", "wincrypt", "windef"], &["wintrust"]), ("winusb", &["minwinbase", "minwindef", "usb", "usbspec", "winnt", "winusbio"], &["winusb"]), ("winuser", &["basetsd", "guiddef", "limits", "minwinbase", "minwindef", "vadefs", "windef", "wingdi", "winnt"], &["user32"]), ("winver", &["minwindef", "winnt"], &["kernel32", "version"]), + ("wlanapi", &["devpropdef", "eaptypes", "guiddef", "l2cmn", "minwindef", "windef", "windot11", "winnt", "wlantypes"], &["wlanapi"]), + ("wlanihv", &["basetsd", "dot1x", "eaptypes", "guiddef", "l2cmn", "minwindef", "windot11", "winnt", "winuser", "wlanihvtypes", "wlantypes", "wlclient"], &[]), + ("wlanihvtypes", &["eaptypes", "guiddef", "minwindef", "winnt", "wlantypes"], &[]), + ("wlclient", &["guiddef", "minwindef", "windot11", "winnt"], &[]), ("wow64apiset", &["minwindef", "winnt"], &["kernel32"]), ("wpdmtpextensions", &["wtypes"], &[]), ("ws2bth", &["bthdef", "bthsdpdef", "guiddef", "minwindef", "winnt", "ws2def"], &[]), ("ws2spi", &["basetsd", "guiddef", "minwindef", "vcruntime", "windef", "winnt", "winsock2", "ws2def", "wtypesbase"], &["ws2_32"]), ("ws2tcpip", &["guiddef", "minwinbase", "minwindef", "mstcpip", "vcruntime", "winerror", "winnt", "winsock2", "ws2def", "wtypesbase"], &["fwpuclnt", "ws2_32"]), + ("wtsapi32", &["minwindef", "ntdef"], &["wtsapi32"]), ("xinput", &["guiddef", "minwindef", "winnt"], &["xinput"]), // vc ("excpt", &[], &[]), @@ -448,7 +478,7 @@ libs.dedup(); // FIXME Temporary hacks until build script is redesigned. libs.retain(|&&lib| match &*var("TARGET").unwrap() { - "aarch64-pc-windows-msvc" | "thumbv7a-pc-windows-msvc" => { + "aarch64-pc-windows-msvc" | "aarch64-uwp-windows-msvc" | "thumbv7a-pc-windows-msvc" => { if lib == "opengl32" { false } else { true } }, diff -Nru cargo-0.44.1/vendor/winapi/.cargo-checksum.json cargo-0.47.0/vendor/winapi/.cargo-checksum.json --- cargo-0.44.1/vendor/winapi/.cargo-checksum.json 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/.cargo-checksum.json 2020-10-01 21:38:28.000000000 +0000 @@ -1 +1 @@ -{"files":{},"package":"8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"} \ No newline at end of file +{"files":{},"package":"5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"} \ No newline at end of file diff -Nru cargo-0.44.1/vendor/winapi/Cargo.toml cargo-0.47.0/vendor/winapi/Cargo.toml --- cargo-0.44.1/vendor/winapi/Cargo.toml 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/Cargo.toml 2020-10-01 21:38:28.000000000 +0000 @@ -12,12 +12,12 @@ [package] name = "winapi" -version = "0.3.8" +version = "0.3.9" authors = ["Peter Atashian "] build = "build.rs" include = ["/src/**/*", "/Cargo.toml", "/LICENSE-MIT", "/LICENSE-APACHE", "/build.rs", "/README.md"] description = "Raw FFI bindings for all of Windows API." -documentation = "https://docs.rs/winapi/*/x86_64-pc-windows-msvc/winapi/" +documentation = "https://docs.rs/winapi/" readme = "README.md" keywords = ["windows", "ffi", "win32", "com", "directx"] categories = ["external-ffi-bindings", "no-std", "os::windows-apis"] @@ -26,11 +26,13 @@ [package.metadata.docs.rs] default-target = "x86_64-pc-windows-msvc" features = ["everything", "impl-debug", "impl-default"] +targets = ["aarch64-pc-windows-msvc", "i686-pc-windows-msvc", "x86_64-pc-windows-msvc"] [features] accctrl = [] aclapi = [] activation = [] +adhoc = [] appmgmt = [] audioclient = [] audiosessiontypes = [] @@ -65,6 +67,7 @@ commdlg = [] commoncontrols = [] consoleapi = [] +corecrt = [] corsym = [] d2d1 = [] d2d1_1 = [] @@ -133,6 +136,7 @@ dmusicc = [] docobj = [] documenttarget = [] +dot1x = [] dpa_dsa = [] dpapi = [] dsgetdc = [] @@ -157,6 +161,7 @@ dxgitype = [] dxva2api = [] dxvahd = [] +eaptypes = [] enclaveapi = [] endpointvolume = [] errhandlingapi = [] @@ -181,6 +186,7 @@ hstring = [] http = [] ifdef = [] +ifmib = [] imm = [] impl-debug = [] impl-default = [] @@ -190,6 +196,12 @@ interlockedapi = [] intsafe = [] ioapiset = [] +ipexport = [] +iphlpapi = [] +ipifcons = [] +ipmib = [] +iprtrmib = [] +iptypes = [] jobapi = [] jobapi2 = [] knownfolders = [] @@ -197,6 +209,7 @@ ksmedia = [] ktmtypes = [] ktmw32 = [] +l2cmn = [] libloaderapi = [] limits = [] lmaccess = [] @@ -226,6 +239,7 @@ mmeapi = [] mmreg = [] mmsystem = [] +mprapidef = [] msaatext = [] mscat = [] mschapp = [] @@ -238,6 +252,8 @@ nb30 = [] ncrypt = [] netioapi = [] +nldef = [] +ntddndis = [] ntddscsi = [] ntddser = [] ntdef = [] @@ -287,6 +303,7 @@ rpc = [] rpcdce = [] rpcndr = [] +rtinfo = [] sapi = [] sapi51 = [] sapi53 = [] @@ -304,6 +321,7 @@ shobjidl = [] shobjidl_core = [] shtypes = [] +softpub = [] spapidef = [] spellcheck = [] sporder = [] @@ -321,6 +339,8 @@ sysinfoapi = [] systemtopologyapi = [] taskschd = [] +tcpestats = [] +tcpmib = [] textstor = [] threadpoolapiset = [] threadpoollegacyapiset = [] @@ -329,11 +349,14 @@ tlhelp32 = [] transportsettingcommon = [] tvout = [] +udpmib = [] unknwnbase = [] urlhist = [] urlmon = [] usb = [] +usbioctl = [] usbiodef = [] +usbscan = [] usbspec = [] userenv = [] usp10 = [] @@ -360,6 +383,7 @@ wincred = [] wincrypt = [] windef = [] +windot11 = [] windowsceip = [] windowsx = [] winefs = [] @@ -381,10 +405,16 @@ winspool = [] winstring = [] winsvc = [] +wintrust = [] winusb = [] winusbio = [] winuser = [] winver = [] +wlanapi = [] +wlanihv = [] +wlanihvtypes = [] +wlantypes = [] +wlclient = [] wmistr = [] wnnc = [] wow64apiset = [] @@ -394,6 +424,7 @@ ws2ipdef = [] ws2spi = [] ws2tcpip = [] +wtsapi32 = [] wtypes = [] wtypesbase = [] xinput = [] @@ -401,11 +432,3 @@ version = "0.4" [target.x86_64-pc-windows-gnu.dependencies.winapi-x86_64-pc-windows-gnu] version = "0.4" -[badges.appveyor] -branch = "0.3" -repository = "retep998/winapi-rs" -service = "github" - -[badges.travis-ci] -branch = "0.3" -repository = "retep998/winapi-rs" diff -Nru cargo-0.44.1/vendor/winapi/README.md cargo-0.47.0/vendor/winapi/README.md --- cargo-0.44.1/vendor/winapi/README.md 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/README.md 2020-10-01 21:38:28.000000000 +0000 @@ -1,4 +1,5 @@ # winapi-rs +[![Build status](https://github.com/retep998/winapi-rs/workflows/Rust/badge.svg)](https://github.com/retep998/winapi-rs/actions) [![Build status](https://ci.appveyor.com/api/projects/status/i47oonf5e7qm5utq/branch/0.3?svg=true)](https://ci.appveyor.com/project/retep998/winapi-rs/branch/0.3) [![Build Status](https://travis-ci.org/retep998/winapi-rs.svg?branch=0.3)](https://travis-ci.org/retep998/winapi-rs) [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/retep998/winapi-rs) @@ -9,13 +10,13 @@ [![License](https://img.shields.io/crates/l/winapi.svg)](https://github.com/retep998/winapi-rs) -[Documentation](https://docs.rs/winapi/*/x86_64-pc-windows-msvc/winapi/) +[Documentation](https://docs.rs/winapi/) -Official IRC channel: #winapi on [Mozilla IRC](https://wiki.mozilla.org/IRC) +Official communication channel: #windows-dev on the [Rust Community Discord](https://discord.gg/aVESxV8) -This crate provides raw FFI bindings to all of Windows API. They are gathered by hand using the Windows 10 SDK from Microsoft. I aim to replace all existing Windows FFI in other crates with this crate through the "[Embrace, extend, and extinguish](http://en.wikipedia.org/wiki/Embrace,_extend_and_extinguish)" technique. +This crate provides raw FFI bindings to all of Windows API. They are gathered by hand using the Windows 10 SDK from Microsoft. I aim to replace all existing Windows FFI in other crates with this crate through the "[Embrace, extend, and extinguish](https://en.wikipedia.org/wiki/Embrace,_extend,_and_extinguish)" technique. -If this crate is missing something you need, feel free to create an issue, open a pull request, or contact me via [other means](http://www.rustaceans.org/retep998). +If this crate is missing something you need, feel free to create an issue, open a pull request, or contact me via [other means](https://www.rustaceans.org/retep998). This crate depends on Rust 1.6 or newer on Windows. On other platforms this crate is a no-op and should compile with Rust 1.2 or newer. @@ -31,7 +32,7 @@ ### How do I know which module an item is defined in? -You can use the search functionality in the [documentation](https://docs.rs/winapi/*/x86_64-pc-windows-msvc/winapi/) to find where items are defined. +You can use the search functionality in the [documentation](https://docs.rs/winapi/) to find where items are defined. ### Why is there no documentation on how to use anything? diff -Nru cargo-0.44.1/vendor/winapi/src/lib.rs cargo-0.47.0/vendor/winapi/src/lib.rs --- cargo-0.44.1/vendor/winapi/src/lib.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/lib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -6,12 +6,12 @@ #![cfg(windows)] #![deny(unused, unused_qualifications)] #![warn(unused_attributes)] -#![allow(bad_style, overflowing_literals, unused_macros)] +#![allow(bad_style, overflowing_literals, unused_macros, deprecated, unused_crate_dependencies)] #![recursion_limit = "2563"] #![no_std] //Uncomment as needed or once minimum Rust version is bumped to 1.18 //#![cfg_attr(feature = "cargo-clippy", warn(clippy::pedantic))] -//#![cfg_attr(feature = "cargo-clippy", allow(clippy::absurd_extreme_comparisons, clippy::cast_lossless, clippy::cast_possible_truncation, clippy::cast_possible_wrap, clippy::cast_precision_loss, clippy::cast_ptr_alignment, clippy::cast_sign_loss, clippy::const_static_lifetime, clippy::doc_markdown, clippy::empty_enum, clippy::erasing_op, clippy::excessive_precision, clippy::expl_impl_clone_on_copy, clippy::identity_op, clippy::if_not_else, clippy::many_single_char_names, clippy::module_inception, clippy::cast_possible_truncation, clippy::too_many_arguments, clippy::transmute_int_to_float, clippy::trivially_copy_pass_by_ref, clippy::unreadable_literal, clippy::unseparated_literal_suffix, clippy::used_underscore_binding))] +//#![cfg_attr(feature = "cargo-clippy", allow(clippy::absurd_extreme_comparisons, clippy::cast_lossless, clippy::cast_possible_truncation, clippy::cast_possible_wrap, clippy::cast_precision_loss, clippy::cast_ptr_alignment, clippy::cast_sign_loss, clippy::doc_markdown, clippy::empty_enum, clippy::erasing_op, clippy::excessive_precision, clippy::expl_impl_clone_on_copy, clippy::identity_op, clippy::if_not_else, clippy::many_single_char_names, clippy::module_inception, clippy::cast_possible_truncation, clippy::too_many_arguments, clippy::transmute_int_to_float, clippy::trivially_copy_pass_by_ref, clippy::unreadable_literal, clippy::unseparated_literal_suffix, clippy::used_underscore_binding, clippy::redundant_static_lifetimes, clippy::missing_safety_doc))] #[cfg(feature = "std")] extern crate std; @@ -25,6 +25,7 @@ mod macros; pub mod km; pub mod shared; +pub mod ucrt; pub mod um; pub mod vc; pub mod winrt; diff -Nru cargo-0.44.1/vendor/winapi/src/shared/d3dukmdt.rs cargo-0.47.0/vendor/winapi/src/shared/d3dukmdt.rs --- cargo-0.44.1/vendor/winapi/src/shared/d3dukmdt.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/d3dukmdt.rs 2020-10-01 21:38:28.000000000 +0000 @@ -254,9 +254,6 @@ STRUCT!{struct D3DDDICB_LOCK2FLAGS { Value: UINT, }} -BITFIELD!{D3DDDICB_LOCK2FLAGS Value: UINT [ - Reserved set_Reserved[0..32], -]} STRUCT!{struct D3DDDICB_DESTROYALLOCATION2FLAGS { Value: UINT, }} diff -Nru cargo-0.44.1/vendor/winapi/src/shared/ifdef.rs cargo-0.47.0/vendor/winapi/src/shared/ifdef.rs --- cargo-0.44.1/vendor/winapi/src/shared/ifdef.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/ifdef.rs 2020-10-01 21:38:28.000000000 +0000 @@ -3,8 +3,68 @@ // , at your option. // All files in the project carrying such notice may not be copied, modified, or distributed // except according to those terms. -use shared::basetsd::{UINT16, ULONG64}; -use shared::minwindef::ULONG; +use shared::basetsd::{UINT16, UINT32, ULONG32, ULONG64}; +use shared::guiddef::GUID; +use shared::ntdef::{BOOLEAN, UCHAR, ULONG, USHORT, WCHAR}; +pub type NET_IF_COMPARTMENT_ID = UINT32; +pub type PNET_IF_COMPARTMENT_ID = *mut NET_IF_COMPARTMENT_ID; +pub const NET_IF_COMPARTMENT_ID_UNSPECIFIED: NET_IF_COMPARTMENT_ID = 0; +pub const NET_IF_COMPARTMENT_ID_PRIMARY: NET_IF_COMPARTMENT_ID = 1; +pub type NET_IF_NETWORK_GUID = GUID; +pub type PNET_IF_NETWORK_GUID = *mut NET_IF_NETWORK_GUID; +ENUM!{enum NET_IF_OPER_STATUS { + NET_IF_OPER_STATUS_UP = 1, + NET_IF_OPER_STATUS_DOWN = 2, + NET_IF_OPER_STATUS_TESTING = 3, + NET_IF_OPER_STATUS_UNKNOWN = 4, + NET_IF_OPER_STATUS_DORMANT = 5, + NET_IF_OPER_STATUS_NOT_PRESENT = 6, + NET_IF_OPER_STATUS_LOWER_LAYER_DOWN = 7, +}} +pub type PNET_IF_OPER_STATUS = *mut NET_IF_OPER_STATUS; +pub type NET_IF_OBJECT_ID = ULONG32; +pub type PNET_IF_OBJECT_ID = *mut NET_IF_OBJECT_ID; +ENUM!{enum NET_IF_ADMIN_STATUS { + NET_IF_ADMIN_STATUS_UP = 1, + NET_IF_ADMIN_STATUS_DOWN = 2, + NET_IF_ADMIN_STATUS_TESTING = 3, +}} +pub type PNET_IF_ADMIN_STATUS = *mut NET_IF_ADMIN_STATUS; +pub type NET_IF_COMPARTMENT_SCOPE = UINT32; +pub type PNET_IF_COMPARTMENT_SCOPE = *mut NET_IF_COMPARTMENT_SCOPE; +pub const NET_IF_COMPARTMENT_SCOPE_UNSPECIFIED: NET_IF_COMPARTMENT_SCOPE = 0; +pub const NET_IF_COMPARTMENT_SCOPE_ALL: NET_IF_COMPARTMENT_SCOPE = -1i32 as u32; +ENUM!{enum NET_IF_RCV_ADDRESS_TYPE { + NET_IF_RCV_ADDRESS_TYPE_OTHER = 1, + NET_IF_RCV_ADDRESS_TYPE_VOLATILE = 2, + NET_IF_RCV_ADDRESS_TYPE_NON_VOLATILE = 3, +}} +pub type PNET_IF_RCV_ADDRESS_TYPE = *mut NET_IF_RCV_ADDRESS_TYPE; +STRUCT!{struct NET_IF_RCV_ADDRESS_LH { + ifRcvAddressType: NET_IF_RCV_ADDRESS_TYPE, + ifRcvAddressLength: USHORT, + ifRcvAddressOffset: USHORT, +}} +pub type PNET_IF_RCV_ADDRESS_LH = *mut NET_IF_RCV_ADDRESS_LH; +STRUCT!{struct NET_IF_ALIAS_LH { + ifAliasLength: USHORT, + ifAliasOffset: USHORT, +}} +pub type PNET_IF_ALIAS_LH = *mut NET_IF_ALIAS_LH; +// FIXME: Switch to union version in 0.4 +// STRUCT!{struct NET_LUID_LH_Info { +// bitfield: ULONG64, +// }} +// BITFIELD!{NET_LUID_LH_Info bitfield: ULONG64 [ +// Reserved set_Reserved[0..24], +// NetLuidIndex set_NetLuidIndex[24..48], +// IfType set_IfType[48..64], +// ]} +// UNION!{struct NET_LUID_LH { +// [u64; 1], +// Value Value_mut: ULONG64, +// Info Info_mut: NET_LUID_LH_Info, +// }} STRUCT!{struct NET_LUID_LH { Value: ULONG64, }} @@ -14,9 +74,135 @@ IfType set_IfType[48..64], ]} pub type PNET_LUID_LH = *mut NET_LUID_LH; +pub type NET_IF_RCV_ADDRESS = NET_IF_RCV_ADDRESS_LH; +pub type PNET_IF_RCV_ADDRESS = *mut NET_IF_RCV_ADDRESS; +pub type NET_IF_ALIAS = NET_IF_ALIAS_LH; +pub type PNET_IF_ALIAS = *mut NET_IF_ALIAS; pub type NET_LUID = NET_LUID_LH; pub type PNET_LUID = *mut NET_LUID; +pub type IF_LUID = NET_LUID; +pub type PIF_LUID = *mut NET_LUID; pub type NET_IFINDEX = ULONG; -pub type PNET_IFINDEX = *mut ULONG; +pub type PNET_IFINDEX = *mut NET_IFINDEX; pub type NET_IFTYPE = UINT16; -pub type PNET_IFTYPE = *mut UINT16; +pub type PNET_IFTYPE = *mut NET_IFTYPE; +pub type IF_INDEX = NET_IFINDEX; +pub type PIF_INDEX = *mut NET_IFINDEX; +ENUM!{enum NET_IF_CONNECTION_TYPE { + NET_IF_CONNECTION_DEDICATED = 1, + NET_IF_CONNECTION_PASSIVE = 2, + NET_IF_CONNECTION_DEMAND = 3, + NET_IF_CONNECTION_MAXIMUM = 4, +}} +pub type PNET_IF_CONNECTION_TYPE = *mut NET_IF_CONNECTION_TYPE; +ENUM!{enum TUNNEL_TYPE { + TUNNEL_TYPE_NONE = 0, + TUNNEL_TYPE_OTHER = 1, + TUNNEL_TYPE_DIRECT = 2, + TUNNEL_TYPE_6TO4 = 11, + TUNNEL_TYPE_ISATAP = 13, + TUNNEL_TYPE_TEREDO = 14, + TUNNEL_TYPE_IPHTTPS = 15, +}} +pub type PTUNNEL_TYPE = *mut TUNNEL_TYPE; +ENUM!{enum NET_IF_ACCESS_TYPE { + NET_IF_ACCESS_LOOPBACK = 1, + NET_IF_ACCESS_BROADCAST = 2, + NET_IF_ACCESS_POINT_TO_POINT = 3, + NET_IF_ACCESS_POINT_TO_MULTI_POINT = 4, + NET_IF_ACCESS_MAXIMUM = 5, +}} +pub type PNET_IF_ACCESS_TYPE = *mut NET_IF_ACCESS_TYPE; +ENUM!{enum NET_IF_DIRECTION_TYPE { + NET_IF_DIRECTION_SENDRECEIVE, + NET_IF_DIRECTION_SENDONLY, + NET_IF_DIRECTION_RECEIVEONLY, + NET_IF_DIRECTION_MAXIMUM, +}} +pub type PNET_IF_DIRECTION_TYPE = *mut NET_IF_DIRECTION_TYPE; +ENUM!{enum NET_IF_MEDIA_CONNECT_STATE { + MediaConnectStateUnknown, + MediaConnectStateConnected, + MediaConnectStateDisconnected, +}} +pub type PNET_IF_MEDIA_CONNECT_STATE = *mut NET_IF_MEDIA_CONNECT_STATE; +ENUM!{enum NET_IF_MEDIA_DUPLEX_STATE { + MediaDuplexStateUnknown = 0, + MediaDuplexStateHalf = 1, + MediaDuplexStateFull = 2, +}} +pub type PNET_IF_MEDIA_DUPLEX_STATE = *mut NET_IF_MEDIA_DUPLEX_STATE; +STRUCT!{struct NET_PHYSICAL_LOCATION_LH { + BusNumber: ULONG, + SlotNumber: ULONG, + FunctionNumber: ULONG, +}} +pub type PNET_PHYSICAL_LOCATION_LH = *mut NET_PHYSICAL_LOCATION_LH; +pub const IF_MAX_STRING_SIZE: usize = 256; +pub const IF_MAX_PHYS_ADDRESS_LENGTH: usize = 32; +STRUCT!{struct IF_COUNTED_STRING_LH { + Length: USHORT, + String: [WCHAR; IF_MAX_STRING_SIZE + 1], +}} +pub type PIF_COUNTED_STRING_LH = *mut IF_COUNTED_STRING_LH; +STRUCT!{struct IF_PHYSICAL_ADDRESS_LH { + Length: USHORT, + Address: [UCHAR; IF_MAX_PHYS_ADDRESS_LENGTH], +}} +pub type PIF_PHYSICAL_ADDRESS_LH = *mut IF_PHYSICAL_ADDRESS_LH; +pub type NET_PHYSICAL_LOCATION = NET_PHYSICAL_LOCATION_LH; +pub type PNET_PHYSICAL_LOCATION = *mut NET_PHYSICAL_LOCATION; +pub type IF_COUNTED_STRING = IF_COUNTED_STRING_LH; +pub type PIF_COUNTED_STRING = *mut IF_COUNTED_STRING; +pub type IF_PHYSICAL_ADDRESS = IF_PHYSICAL_ADDRESS_LH; +pub type PIF_PHYSICAL_ADDRESS = *mut IF_PHYSICAL_ADDRESS; +ENUM!{enum IF_ADMINISTRATIVE_STATE { + IF_ADMINISTRATIVE_DISABLED = 0, + IF_ADMINISTRATIVE_ENABLED = 1, + IF_ADMINISTRATIVE_DEMANDDIAL = 2, +}} +pub type PIF_ADMINISTRATIVE_STATE = *mut IF_ADMINISTRATIVE_STATE; +ENUM!{enum IF_OPER_STATUS { + IfOperStatusUp = 1, + IfOperStatusDown, + IfOperStatusTesting, + IfOperStatusUnknown, + IfOperStatusDormant, + IfOperStatusNotPresent, + IfOperStatusLowerLayerDown, +}} +STRUCT!{struct NDIS_INTERFACE_INFORMATION { + ifOperStatus: NET_IF_OPER_STATUS, + ifOperStatusFlags: ULONG, + MediaConnectState: NET_IF_MEDIA_CONNECT_STATE, + MediaDuplexState: NET_IF_MEDIA_DUPLEX_STATE, + ifMtu: ULONG, + ifPromiscuousMode: BOOLEAN, + ifDeviceWakeUpEnable: BOOLEAN, + XmitLinkSpeed: ULONG64, + RcvLinkSpeed: ULONG64, + ifLastChange: ULONG64, + ifCounterDiscontinuityTime: ULONG64, + ifInUnknownProtos: ULONG64, + ifInDiscards: ULONG64, + ifInErrors: ULONG64, + ifHCInOctets: ULONG64, + ifHCInUcastPkts: ULONG64, + ifHCInMulticastPkts: ULONG64, + ifHCInBroadcastPkts: ULONG64, + ifHCOutOctets: ULONG64, + ifHCOutUcastPkts: ULONG64, + ifHCOutMulticastPkts: ULONG64, + ifHCOutBroadcastPkts: ULONG64, + ifOutErrors: ULONG64, + ifOutDiscards: ULONG64, + ifHCInUcastOctets: ULONG64, + ifHCInMulticastOctets: ULONG64, + ifHCInBroadcastOctets: ULONG64, + ifHCOutUcastOctets: ULONG64, + ifHCOutMulticastOctets: ULONG64, + ifHCOutBroadcastOctets: ULONG64, + CompartmentId: NET_IF_COMPARTMENT_ID, + SupportedStatistics: ULONG, +}} +pub type PNDIS_INTERFACE_INFORMATION = *mut NDIS_INTERFACE_INFORMATION; diff -Nru cargo-0.44.1/vendor/winapi/src/shared/ifmib.rs cargo-0.47.0/vendor/winapi/src/shared/ifmib.rs --- cargo-0.44.1/vendor/winapi/src/shared/ifmib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/ifmib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,53 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +//! Contains the public definitions and structures for the non-TCP/IP specific parts of MIB-II. +// #include +// #include +use shared::ifdef::IF_INDEX; +use shared::ipifcons::{IFTYPE, INTERNAL_IF_OPER_STATUS}; +use shared::minwindef::DWORD; +use shared::ntdef::{UCHAR, WCHAR}; +const ANY_SIZE: usize = 1; +STRUCT!{struct MIB_IFNUMBER { + dwValue: DWORD, +}} +pub type PMIB_IFNUMBER = *mut MIB_IFNUMBER; +pub const MAXLEN_PHYSADDR: usize = 8; +pub const MAXLEN_IFDESCR: usize = 256; +pub const MAX_INTERFACE_NAME_LEN: usize = 256; +STRUCT!{struct MIB_IFROW { + wszName: [WCHAR; MAX_INTERFACE_NAME_LEN], + dwIndex: IF_INDEX, + dwType: IFTYPE, + dwMtu: DWORD, + dwSpeed: DWORD, + dwPhysAddrLen: DWORD, + bPhysAddr: [UCHAR; MAXLEN_PHYSADDR], + dwAdminStatus: DWORD, + dwOperStatus: INTERNAL_IF_OPER_STATUS, + dwLastChange: DWORD, + dwInOctets: DWORD, + dwInUcastPkts: DWORD, + dwInNUcastPkts: DWORD, + dwInDiscards: DWORD, + dwInErrors: DWORD, + dwInUnknownProtos: DWORD, + dwOutOctets: DWORD, + dwOutUcastPkts: DWORD, + dwOutNUcastPkts: DWORD, + dwOutDiscards: DWORD, + dwOutErrors: DWORD, + dwOutQLen: DWORD, + dwDescrLen: DWORD, + bDescr: [UCHAR; MAXLEN_IFDESCR], +}} +pub type PMIB_IFROW = *mut MIB_IFROW; +STRUCT!{struct MIB_IFTABLE { + dwNumEntries: DWORD, + table: [MIB_IFROW; ANY_SIZE], +}} +pub type PMIB_IFTABLE = *mut MIB_IFTABLE; +// FIXME: SIZEOF_IFTABLE(x) diff -Nru cargo-0.44.1/vendor/winapi/src/shared/in6addr.rs cargo-0.47.0/vendor/winapi/src/shared/in6addr.rs --- cargo-0.44.1/vendor/winapi/src/shared/in6addr.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/in6addr.rs 2020-10-01 21:38:28.000000000 +0000 @@ -3,6 +3,7 @@ // , at your option. // All files in the project carrying such notice may not be copied, modified, or distributed // except according to those terms. +//! IPv6 Internet address, 'on-wire' format structure. use shared::minwindef::{UCHAR, USHORT}; UNION!{union in6_addr_u { [u16; 8], diff -Nru cargo-0.44.1/vendor/winapi/src/shared/inaddr.rs cargo-0.47.0/vendor/winapi/src/shared/inaddr.rs --- cargo-0.44.1/vendor/winapi/src/shared/inaddr.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/inaddr.rs 2020-10-01 21:38:28.000000000 +0000 @@ -3,7 +3,7 @@ // , at your option. // All files in the project carrying such notice may not be copied, modified, or distributed // except according to those terms. -//! IPv4 Internet address +//! IPv4 Internet address, 'on-wire' format structure. use shared::minwindef::{UCHAR, ULONG, USHORT}; STRUCT!{struct in_addr_S_un_b { s_b1: UCHAR, diff -Nru cargo-0.44.1/vendor/winapi/src/shared/ipifcons.rs cargo-0.47.0/vendor/winapi/src/shared/ipifcons.rs --- cargo-0.44.1/vendor/winapi/src/shared/ipifcons.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/ipifcons.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,244 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +//! Constants needed for the Interface Object +// #include +use shared::minwindef::ULONG; +pub type IFTYPE = ULONG; +pub const MIN_IF_TYPE: IFTYPE = 1; +pub const IF_TYPE_OTHER: IFTYPE = 1; +pub const IF_TYPE_REGULAR_1822: IFTYPE = 2; +pub const IF_TYPE_HDH_1822: IFTYPE = 3; +pub const IF_TYPE_DDN_X25: IFTYPE = 4; +pub const IF_TYPE_RFC877_X25: IFTYPE = 5; +pub const IF_TYPE_ETHERNET_CSMACD: IFTYPE = 6; +pub const IF_TYPE_IS088023_CSMACD: IFTYPE = 7; +pub const IF_TYPE_ISO88024_TOKENBUS: IFTYPE = 8; +pub const IF_TYPE_ISO88025_TOKENRING: IFTYPE = 9; +pub const IF_TYPE_ISO88026_MAN: IFTYPE = 10; +pub const IF_TYPE_STARLAN: IFTYPE = 11; +pub const IF_TYPE_PROTEON_10MBIT: IFTYPE = 12; +pub const IF_TYPE_PROTEON_80MBIT: IFTYPE = 13; +pub const IF_TYPE_HYPERCHANNEL: IFTYPE = 14; +pub const IF_TYPE_FDDI: IFTYPE = 15; +pub const IF_TYPE_LAP_B: IFTYPE = 16; +pub const IF_TYPE_SDLC: IFTYPE = 17; +pub const IF_TYPE_DS1: IFTYPE = 18; +pub const IF_TYPE_E1: IFTYPE = 19; +pub const IF_TYPE_BASIC_ISDN: IFTYPE = 20; +pub const IF_TYPE_PRIMARY_ISDN: IFTYPE = 21; +pub const IF_TYPE_PROP_POINT2POINT_SERIAL: IFTYPE = 22; +pub const IF_TYPE_PPP: IFTYPE = 23; +pub const IF_TYPE_SOFTWARE_LOOPBACK: IFTYPE = 24; +pub const IF_TYPE_EON: IFTYPE = 25; +pub const IF_TYPE_ETHERNET_3MBIT: IFTYPE = 26; +pub const IF_TYPE_NSIP: IFTYPE = 27; +pub const IF_TYPE_SLIP: IFTYPE = 28; +pub const IF_TYPE_ULTRA: IFTYPE = 29; +pub const IF_TYPE_DS3: IFTYPE = 30; +pub const IF_TYPE_SIP: IFTYPE = 31; +pub const IF_TYPE_FRAMERELAY: IFTYPE = 32; +pub const IF_TYPE_RS232: IFTYPE = 33; +pub const IF_TYPE_PARA: IFTYPE = 34; +pub const IF_TYPE_ARCNET: IFTYPE = 35; +pub const IF_TYPE_ARCNET_PLUS: IFTYPE = 36; +pub const IF_TYPE_ATM: IFTYPE = 37; +pub const IF_TYPE_MIO_X25: IFTYPE = 38; +pub const IF_TYPE_SONET: IFTYPE = 39; +pub const IF_TYPE_X25_PLE: IFTYPE = 40; +pub const IF_TYPE_ISO88022_LLC: IFTYPE = 41; +pub const IF_TYPE_LOCALTALK: IFTYPE = 42; +pub const IF_TYPE_SMDS_DXI: IFTYPE = 43; +pub const IF_TYPE_FRAMERELAY_SERVICE: IFTYPE = 44; +pub const IF_TYPE_V35: IFTYPE = 45; +pub const IF_TYPE_HSSI: IFTYPE = 46; +pub const IF_TYPE_HIPPI: IFTYPE = 47; +pub const IF_TYPE_MODEM: IFTYPE = 48; +pub const IF_TYPE_AAL5: IFTYPE = 49; +pub const IF_TYPE_SONET_PATH: IFTYPE = 50; +pub const IF_TYPE_SONET_VT: IFTYPE = 51; +pub const IF_TYPE_SMDS_ICIP: IFTYPE = 52; +pub const IF_TYPE_PROP_VIRTUAL: IFTYPE = 53; +pub const IF_TYPE_PROP_MULTIPLEXOR: IFTYPE = 54; +pub const IF_TYPE_IEEE80212: IFTYPE = 55; +pub const IF_TYPE_FIBRECHANNEL: IFTYPE = 56; +pub const IF_TYPE_HIPPIINTERFACE: IFTYPE = 57; +pub const IF_TYPE_FRAMERELAY_INTERCONNECT: IFTYPE = 58; +pub const IF_TYPE_AFLANE_8023: IFTYPE = 59; +pub const IF_TYPE_AFLANE_8025: IFTYPE = 60; +pub const IF_TYPE_CCTEMUL: IFTYPE = 61; +pub const IF_TYPE_FASTETHER: IFTYPE = 62; +pub const IF_TYPE_ISDN: IFTYPE = 63; +pub const IF_TYPE_V11: IFTYPE = 64; +pub const IF_TYPE_V36: IFTYPE = 65; +pub const IF_TYPE_G703_64K: IFTYPE = 66; +pub const IF_TYPE_G703_2MB: IFTYPE = 67; +pub const IF_TYPE_QLLC: IFTYPE = 68; +pub const IF_TYPE_FASTETHER_FX: IFTYPE = 69; +pub const IF_TYPE_CHANNEL: IFTYPE = 70; +pub const IF_TYPE_IEEE80211: IFTYPE = 71; +pub const IF_TYPE_IBM370PARCHAN: IFTYPE = 72; +pub const IF_TYPE_ESCON: IFTYPE = 73; +pub const IF_TYPE_DLSW: IFTYPE = 74; +pub const IF_TYPE_ISDN_S: IFTYPE = 75; +pub const IF_TYPE_ISDN_U: IFTYPE = 76; +pub const IF_TYPE_LAP_D: IFTYPE = 77; +pub const IF_TYPE_IPSWITCH: IFTYPE = 78; +pub const IF_TYPE_RSRB: IFTYPE = 79; +pub const IF_TYPE_ATM_LOGICAL: IFTYPE = 80; +pub const IF_TYPE_DS0: IFTYPE = 81; +pub const IF_TYPE_DS0_BUNDLE: IFTYPE = 82; +pub const IF_TYPE_BSC: IFTYPE = 83; +pub const IF_TYPE_ASYNC: IFTYPE = 84; +pub const IF_TYPE_CNR: IFTYPE = 85; +pub const IF_TYPE_ISO88025R_DTR: IFTYPE = 86; +pub const IF_TYPE_EPLRS: IFTYPE = 87; +pub const IF_TYPE_ARAP: IFTYPE = 88; +pub const IF_TYPE_PROP_CNLS: IFTYPE = 89; +pub const IF_TYPE_HOSTPAD: IFTYPE = 90; +pub const IF_TYPE_TERMPAD: IFTYPE = 91; +pub const IF_TYPE_FRAMERELAY_MPI: IFTYPE = 92; +pub const IF_TYPE_X213: IFTYPE = 93; +pub const IF_TYPE_ADSL: IFTYPE = 94; +pub const IF_TYPE_RADSL: IFTYPE = 95; +pub const IF_TYPE_SDSL: IFTYPE = 96; +pub const IF_TYPE_VDSL: IFTYPE = 97; +pub const IF_TYPE_ISO88025_CRFPRINT: IFTYPE = 98; +pub const IF_TYPE_MYRINET: IFTYPE = 99; +pub const IF_TYPE_VOICE_EM: IFTYPE = 100; +pub const IF_TYPE_VOICE_FXO: IFTYPE = 101; +pub const IF_TYPE_VOICE_FXS: IFTYPE = 102; +pub const IF_TYPE_VOICE_ENCAP: IFTYPE = 103; +pub const IF_TYPE_VOICE_OVERIP: IFTYPE = 104; +pub const IF_TYPE_ATM_DXI: IFTYPE = 105; +pub const IF_TYPE_ATM_FUNI: IFTYPE = 106; +pub const IF_TYPE_ATM_IMA: IFTYPE = 107; +pub const IF_TYPE_PPPMULTILINKBUNDLE: IFTYPE = 108; +pub const IF_TYPE_IPOVER_CDLC: IFTYPE = 109; +pub const IF_TYPE_IPOVER_CLAW: IFTYPE = 110; +pub const IF_TYPE_STACKTOSTACK: IFTYPE = 111; +pub const IF_TYPE_VIRTUALIPADDRESS: IFTYPE = 112; +pub const IF_TYPE_MPC: IFTYPE = 113; +pub const IF_TYPE_IPOVER_ATM: IFTYPE = 114; +pub const IF_TYPE_ISO88025_FIBER: IFTYPE = 115; +pub const IF_TYPE_TDLC: IFTYPE = 116; +pub const IF_TYPE_GIGABITETHERNET: IFTYPE = 117; +pub const IF_TYPE_HDLC: IFTYPE = 118; +pub const IF_TYPE_LAP_F: IFTYPE = 119; +pub const IF_TYPE_V37: IFTYPE = 120; +pub const IF_TYPE_X25_MLP: IFTYPE = 121; +pub const IF_TYPE_X25_HUNTGROUP: IFTYPE = 122; +pub const IF_TYPE_TRANSPHDLC: IFTYPE = 123; +pub const IF_TYPE_INTERLEAVE: IFTYPE = 124; +pub const IF_TYPE_FAST: IFTYPE = 125; +pub const IF_TYPE_IP: IFTYPE = 126; +pub const IF_TYPE_DOCSCABLE_MACLAYER: IFTYPE = 127; +pub const IF_TYPE_DOCSCABLE_DOWNSTREAM: IFTYPE = 128; +pub const IF_TYPE_DOCSCABLE_UPSTREAM: IFTYPE = 129; +pub const IF_TYPE_A12MPPSWITCH: IFTYPE = 130; +pub const IF_TYPE_TUNNEL: IFTYPE = 131; +pub const IF_TYPE_COFFEE: IFTYPE = 132; +pub const IF_TYPE_CES: IFTYPE = 133; +pub const IF_TYPE_ATM_SUBINTERFACE: IFTYPE = 134; +pub const IF_TYPE_L2_VLAN: IFTYPE = 135; +pub const IF_TYPE_L3_IPVLAN: IFTYPE = 136; +pub const IF_TYPE_L3_IPXVLAN: IFTYPE = 137; +pub const IF_TYPE_DIGITALPOWERLINE: IFTYPE = 138; +pub const IF_TYPE_MEDIAMAILOVERIP: IFTYPE = 139; +pub const IF_TYPE_DTM: IFTYPE = 140; +pub const IF_TYPE_DCN: IFTYPE = 141; +pub const IF_TYPE_IPFORWARD: IFTYPE = 142; +pub const IF_TYPE_MSDSL: IFTYPE = 143; +pub const IF_TYPE_IEEE1394: IFTYPE = 144; +pub const IF_TYPE_IF_GSN: IFTYPE = 145; +pub const IF_TYPE_DVBRCC_MACLAYER: IFTYPE = 146; +pub const IF_TYPE_DVBRCC_DOWNSTREAM: IFTYPE = 147; +pub const IF_TYPE_DVBRCC_UPSTREAM: IFTYPE = 148; +pub const IF_TYPE_ATM_VIRTUAL: IFTYPE = 149; +pub const IF_TYPE_MPLS_TUNNEL: IFTYPE = 150; +pub const IF_TYPE_SRP: IFTYPE = 151; +pub const IF_TYPE_VOICEOVERATM: IFTYPE = 152; +pub const IF_TYPE_VOICEOVERFRAMERELAY: IFTYPE = 153; +pub const IF_TYPE_IDSL: IFTYPE = 154; +pub const IF_TYPE_COMPOSITELINK: IFTYPE = 155; +pub const IF_TYPE_SS7_SIGLINK: IFTYPE = 156; +pub const IF_TYPE_PROP_WIRELESS_P2P: IFTYPE = 157; +pub const IF_TYPE_FR_FORWARD: IFTYPE = 158; +pub const IF_TYPE_RFC1483: IFTYPE = 159; +pub const IF_TYPE_USB: IFTYPE = 160; +pub const IF_TYPE_IEEE8023AD_LAG: IFTYPE = 161; +pub const IF_TYPE_BGP_POLICY_ACCOUNTING: IFTYPE = 162; +pub const IF_TYPE_FRF16_MFR_BUNDLE: IFTYPE = 163; +pub const IF_TYPE_H323_GATEKEEPER: IFTYPE = 164; +pub const IF_TYPE_H323_PROXY: IFTYPE = 165; +pub const IF_TYPE_MPLS: IFTYPE = 166; +pub const IF_TYPE_MF_SIGLINK: IFTYPE = 167; +pub const IF_TYPE_HDSL2: IFTYPE = 168; +pub const IF_TYPE_SHDSL: IFTYPE = 169; +pub const IF_TYPE_DS1_FDL: IFTYPE = 170; +pub const IF_TYPE_POS: IFTYPE = 171; +pub const IF_TYPE_DVB_ASI_IN: IFTYPE = 172; +pub const IF_TYPE_DVB_ASI_OUT: IFTYPE = 173; +pub const IF_TYPE_PLC: IFTYPE = 174; +pub const IF_TYPE_NFAS: IFTYPE = 175; +pub const IF_TYPE_TR008: IFTYPE = 176; +pub const IF_TYPE_GR303_RDT: IFTYPE = 177; +pub const IF_TYPE_GR303_IDT: IFTYPE = 178; +pub const IF_TYPE_ISUP: IFTYPE = 179; +pub const IF_TYPE_PROP_DOCS_WIRELESS_MACLAYER: IFTYPE = 180; +pub const IF_TYPE_PROP_DOCS_WIRELESS_DOWNSTREAM: IFTYPE = 181; +pub const IF_TYPE_PROP_DOCS_WIRELESS_UPSTREAM: IFTYPE = 182; +pub const IF_TYPE_HIPERLAN2: IFTYPE = 183; +pub const IF_TYPE_PROP_BWA_P2MP: IFTYPE = 184; +pub const IF_TYPE_SONET_OVERHEAD_CHANNEL: IFTYPE = 185; +pub const IF_TYPE_DIGITAL_WRAPPER_OVERHEAD_CHANNEL: IFTYPE = 186; +pub const IF_TYPE_AAL2: IFTYPE = 187; +pub const IF_TYPE_RADIO_MAC: IFTYPE = 188; +pub const IF_TYPE_ATM_RADIO: IFTYPE = 189; +pub const IF_TYPE_IMT: IFTYPE = 190; +pub const IF_TYPE_MVL: IFTYPE = 191; +pub const IF_TYPE_REACH_DSL: IFTYPE = 192; +pub const IF_TYPE_FR_DLCI_ENDPT: IFTYPE = 193; +pub const IF_TYPE_ATM_VCI_ENDPT: IFTYPE = 194; +pub const IF_TYPE_OPTICAL_CHANNEL: IFTYPE = 195; +pub const IF_TYPE_OPTICAL_TRANSPORT: IFTYPE = 196; +pub const IF_TYPE_IEEE80216_WMAN: IFTYPE = 237; +pub const IF_TYPE_WWANPP: IFTYPE = 243; +pub const IF_TYPE_WWANPP2: IFTYPE = 244; +pub const IF_TYPE_IEEE802154: IFTYPE = 259; +pub const IF_TYPE_XBOX_WIRELESS: IFTYPE = 281; +pub const MAX_IF_TYPE: IFTYPE = 281; +ENUM!{enum IF_ACCESS_TYPE { + IF_ACCESS_LOOPBACK = 1, + IF_ACCESS_BROADCAST = 2, + IF_ACCESS_POINT_TO_POINT = 3, + IF_ACCESS_POINTTOPOINT = 3, + IF_ACCESS_POINT_TO_MULTI_POINT = 4, + IF_ACCESS_POINTTOMULTIPOINT = 4, +}} +ENUM!{enum INTERNAL_IF_OPER_STATUS { + IF_OPER_STATUS_NON_OPERATIONAL = 0, + IF_OPER_STATUS_UNREACHABLE = 1, + IF_OPER_STATUS_DISCONNECTED = 2, + IF_OPER_STATUS_CONNECTING = 3, + IF_OPER_STATUS_CONNECTED = 4, + IF_OPER_STATUS_OPERATIONAL = 5, +}} +pub const MIB_IF_TYPE_OTHER: IFTYPE = 1; +pub const MIB_IF_TYPE_ETHERNET: IFTYPE = 6; +pub const MIB_IF_TYPE_TOKENRING: IFTYPE = 9; +pub const MIB_IF_TYPE_FDDI: IFTYPE = 15; +pub const MIB_IF_TYPE_PPP: IFTYPE = 23; +pub const MIB_IF_TYPE_LOOPBACK: IFTYPE = 24; +pub const MIB_IF_TYPE_SLIP: IFTYPE = 28; +pub const MIB_IF_ADMIN_STATUS_UP: IFTYPE = 1; +pub const MIB_IF_ADMIN_STATUS_DOWN: IFTYPE = 2; +pub const MIB_IF_ADMIN_STATUS_TESTING: IFTYPE = 3; +pub const MIB_IF_OPER_STATUS_NON_OPERATIONAL: INTERNAL_IF_OPER_STATUS = 0; +pub const MIB_IF_OPER_STATUS_UNREACHABLE: INTERNAL_IF_OPER_STATUS = 1; +pub const MIB_IF_OPER_STATUS_DISCONNECTED: INTERNAL_IF_OPER_STATUS = 2; +pub const MIB_IF_OPER_STATUS_CONNECTING: INTERNAL_IF_OPER_STATUS = 3; +pub const MIB_IF_OPER_STATUS_CONNECTED: INTERNAL_IF_OPER_STATUS = 4; +pub const MIB_IF_OPER_STATUS_OPERATIONAL: INTERNAL_IF_OPER_STATUS = 5; diff -Nru cargo-0.44.1/vendor/winapi/src/shared/ipmib.rs cargo-0.47.0/vendor/winapi/src/shared/ipmib.rs --- cargo-0.44.1/vendor/winapi/src/shared/ipmib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/ipmib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,381 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +use shared::ifdef::IF_INDEX; +use shared::ifmib::MAXLEN_PHYSADDR; +use shared::minwindef::DWORD; +use shared::nldef::NL_ROUTE_PROTOCOL; +use shared::ntdef::{PVOID, UCHAR, ULONG, USHORT}; +const ANY_SIZE: usize = 1; +STRUCT!{struct MIB_IPADDRROW_XP { + dwAddr: DWORD, + dwIndex: IF_INDEX, + dwMask: DWORD, + dwBCastAddr: DWORD, + dwReasmSize: DWORD, + unused1: USHORT, + wType: USHORT, +}} +pub type PMIB_IPADDRROW_XP = *mut MIB_IPADDRROW_XP; +STRUCT!{struct MIB_IPADDRROW_W2K { + dwAddr: DWORD, + dwIndex: DWORD, + dwMask: DWORD, + dwBCastAddr: DWORD, + dwReasmSize: DWORD, + unused1: USHORT, + unused2: USHORT, +}} +pub type PMIB_IPADDRROW_W2K = *mut MIB_IPADDRROW_W2K; +pub type MIB_IPADDRROW = MIB_IPADDRROW_XP; +pub type PMIB_IPADDRROW = *mut MIB_IPADDRROW; +STRUCT!{struct MIB_IPADDRTABLE { + dwNumEntries: DWORD, + table: [MIB_IPADDRROW; ANY_SIZE], +}} +pub type PMIB_IPADDRTABLE = *mut MIB_IPADDRTABLE; +// FIXME: SIZEOF_IPADDRTABLE(x) +STRUCT!{struct MIB_IPFORWARDNUMBER { + dwValue: DWORD, +}} +pub type PMIB_IPFORWARDNUMBER = *mut MIB_IPFORWARDNUMBER; +pub type MIB_IPFORWARD_PROTO = NL_ROUTE_PROTOCOL; +ENUM!{enum MIB_IPFORWARD_TYPE { + MIB_IPROUTE_TYPE_OTHER = 1, + MIB_IPROUTE_TYPE_INVALID = 2, + MIB_IPROUTE_TYPE_DIRECT = 3, + MIB_IPROUTE_TYPE_INDIRECT = 4, +}} +STRUCT!{struct MIB_IPFORWARDROW { + dwForwardDest: DWORD, + dwForwardMask: DWORD, + dwForwardPolicy: DWORD, + dwForwardNextHop: DWORD, + dwForwardIfIndex: IF_INDEX, + ForwardType: MIB_IPFORWARD_TYPE, + ForwardProto: MIB_IPFORWARD_PROTO, + dwForwardAge: DWORD, + dwForwardNextHopAS: DWORD, + dwForwardMetric1: DWORD, + dwForwardMetric2: DWORD, + dwForwardMetric3: DWORD, + dwForwardMetric4: DWORD, + dwForwardMetric5: DWORD, +}} +pub type PMIB_IPFORWARDROW = *mut MIB_IPFORWARDROW; +STRUCT!{struct MIB_IPFORWARDTABLE { + dwNumEntries: DWORD, + table: [MIB_IPFORWARDROW; ANY_SIZE], +}} +pub type PMIB_IPFORWARDTABLE = *mut MIB_IPFORWARDTABLE; +// FIXME: SIZEOF_IPFORWARDTABLE(x) +ENUM!{enum MIB_IPNET_TYPE { + MIB_IPNET_TYPE_OTHER = 1, + MIB_IPNET_TYPE_INVALID = 2, + MIB_IPNET_TYPE_DYNAMIC = 3, + MIB_IPNET_TYPE_STATIC = 4, +}} +STRUCT!{struct MIB_IPNETROW_LH { + dwIndex: IF_INDEX, + dwPhysAddrLen: DWORD, + bPhysAddr: [UCHAR; MAXLEN_PHYSADDR], + dwAddr: DWORD, + Type: MIB_IPNET_TYPE, +}} +pub type PMIB_IPNETROW_LH = *mut MIB_IPNETROW_LH; +STRUCT!{struct MIB_IPNETROW_W2K { + dwIndex: IF_INDEX, + dwPhysAddrLen: DWORD, + bPhysAddr: [UCHAR; MAXLEN_PHYSADDR], + dwAddr: DWORD, + dwType: DWORD, +}} +pub type PMIB_IPNETROW_W2K = *mut MIB_IPNETROW_W2K; +pub type MIB_IPNETROW = MIB_IPNETROW_LH; +pub type PMIB_IPNETROW = *mut MIB_IPNETROW; +STRUCT!{struct MIB_IPNETTABLE { + dwNumEntries: DWORD, + table: [MIB_IPNETROW; ANY_SIZE], +}} +pub type PMIB_IPNETTABLE = *mut MIB_IPNETTABLE; +// FIXME: SIZEOF_IPNETTABLE(x) +ENUM!{enum MIB_IPSTATS_FORWARDING { + MIB_IP_FORWARDING = 1, + MIB_IP_NOT_FORWARDING = 2, +}} +pub type PMIB_IPSTATS_FORWARDING = *mut MIB_IPSTATS_FORWARDING; +STRUCT!{struct MIB_IPSTATS_LH { + Forwarding: MIB_IPSTATS_FORWARDING, + dwDefaultTTL: DWORD, + dwInReceives: DWORD, + dwInHdrErrors: DWORD, + dwInAddrErrors: DWORD, + dwForwDatagrams: DWORD, + dwInUnknownProtos: DWORD, + dwInDiscards: DWORD, + dwInDelivers: DWORD, + dwOutRequests: DWORD, + dwRoutingDiscards: DWORD, + dwOutDiscards: DWORD, + dwOutNoRoutes: DWORD, + dwReasmTimeout: DWORD, + dwReasmReqds: DWORD, + dwReasmOks: DWORD, + dwReasmFails: DWORD, + dwFragOks: DWORD, + dwFragFails: DWORD, + dwFragCreates: DWORD, + dwNumIf: DWORD, + dwNumAddr: DWORD, + dwNumRoutes: DWORD, +}} +pub type PMIB_IPSTATS_LH = *mut MIB_IPSTATS_LH; +STRUCT!{struct MIB_IPSTATS_W2K { + dwForwarding: DWORD, + dwDefaultTTL: DWORD, + dwInReceives: DWORD, + dwInHdrErrors: DWORD, + dwInAddrErrors: DWORD, + dwForwDatagrams: DWORD, + dwInUnknownProtos: DWORD, + dwInDiscards: DWORD, + dwInDelivers: DWORD, + dwOutRequests: DWORD, + dwRoutingDiscards: DWORD, + dwOutDiscards: DWORD, + dwOutNoRoutes: DWORD, + dwReasmTimeout: DWORD, + dwReasmReqds: DWORD, + dwReasmOks: DWORD, + dwReasmFails: DWORD, + dwFragOks: DWORD, + dwFragFails: DWORD, + dwFragCreates: DWORD, + dwNumIf: DWORD, + dwNumAddr: DWORD, + dwNumRoutes: DWORD, +}} +pub type PMIB_IPSTATS_W2K = *mut MIB_IPSTATS_W2K; +pub type MIB_IPSTATS = MIB_IPSTATS_LH; +pub type PMIB_IPSTATS = *mut MIB_IPSTATS; +STRUCT!{struct MIBICMPSTATS { + dwMsgs: DWORD, + dwErrors: DWORD, + dwDestUnreachs: DWORD, + dwTimeExcds: DWORD, + dwParmProbs: DWORD, + dwSrcQuenchs: DWORD, + dwRedirects: DWORD, + dwEchos: DWORD, + dwEchoReps: DWORD, + dwTimestamps: DWORD, + dwTimestampReps: DWORD, + dwAddrMasks: DWORD, + dwAddrMaskReps: DWORD, +}} +pub type PMIBICMPSTATS = *mut MIBICMPSTATS; +STRUCT!{struct MIBICMPINFO { + icmpInStats: MIBICMPSTATS, + icmpOutStats: MIBICMPSTATS, +}} +STRUCT!{struct MIB_ICMP { + stats: MIBICMPINFO, +}} +pub type PMIB_ICMP = *mut MIB_ICMP; +STRUCT!{struct MIBICMPSTATS_EX_XPSP1 { + dwMsgs: DWORD, + dwErrors: DWORD, + rgdwTypeCount: [DWORD; 256], +}} +pub type PMIBICMPSTATS_EX_XPSP1 = *mut MIBICMPSTATS_EX_XPSP1; +pub type MIBICMPSTATS_EX = MIBICMPSTATS_EX_XPSP1; +pub type PMIBICMPSTATS_EX = *mut MIBICMPSTATS_EX_XPSP1; +STRUCT!{struct MIB_ICMP_EX_XPSP1 { + icmpInStats: MIBICMPSTATS_EX, + icmpOutStats: MIBICMPSTATS_EX, +}} +pub type PMIB_ICMP_EX_XPSP1 = *mut MIB_ICMP_EX_XPSP1; +pub type MIB_ICMP_EX = MIB_ICMP_EX_XPSP1; +pub type PMIB_ICMP_EX = *mut MIB_ICMP_EX_XPSP1; +ENUM!{enum ICMP6_TYPE { + ICMP6_DST_UNREACH = 1, + ICMP6_PACKET_TOO_BIG = 2, + ICMP6_TIME_EXCEEDED = 3, + ICMP6_PARAM_PROB = 4, + ICMP6_ECHO_REQUEST = 128, + ICMP6_ECHO_REPLY = 129, + ICMP6_MEMBERSHIP_QUERY = 130, + ICMP6_MEMBERSHIP_REPORT = 131, + ICMP6_MEMBERSHIP_REDUCTION = 132, + ND_ROUTER_SOLICIT = 133, + ND_ROUTER_ADVERT = 134, + ND_NEIGHBOR_SOLICIT = 135, + ND_NEIGHBOR_ADVERT = 136, + ND_REDIRECT = 137, + ICMP6_V2_MEMBERSHIP_REPORT = 143, +}} +pub type PICMP6_TYPE = *mut ICMP6_TYPE; +ENUM!{enum ICMP4_TYPE { + ICMP4_ECHO_REPLY = 0, + ICMP4_DST_UNREACH = 3, + ICMP4_SOURCE_QUENCH = 4, + ICMP4_REDIRECT = 5, + ICMP4_ECHO_REQUEST = 8, + ICMP4_ROUTER_ADVERT = 9, + ICMP4_ROUTER_SOLICIT = 10, + ICMP4_TIME_EXCEEDED = 11, + ICMP4_PARAM_PROB = 12, + ICMP4_TIMESTAMP_REQUEST = 13, + ICMP4_TIMESTAMP_REPLY = 14, + ICMP4_MASK_REQUEST = 17, + ICMP4_MASK_REPLY = 18, +}} +pub type PICMP4_TYPE = *mut ICMP4_TYPE; +STRUCT!{struct MIB_IPMCAST_OIF_XP { + dwOutIfIndex: DWORD, + dwNextHopAddr: DWORD, + dwReserved: DWORD, + dwReserved1: DWORD, +}} +pub type PMIB_IPMCAST_OIF_XP = *mut MIB_IPMCAST_OIF_XP; +STRUCT!{struct MIB_IPMCAST_OIF_W2K { + dwOutIfIndex: DWORD, + dwNextHopAddr: DWORD, + pvReserved: PVOID, + dwReserved: DWORD, +}} +pub type PMIB_IPMCAST_OIF_W2K = *mut MIB_IPMCAST_OIF_W2K; +pub type MIB_IPMCAST_OIF = MIB_IPMCAST_OIF_XP; +pub type PMIB_IPMCAST_OIF = *mut MIB_IPMCAST_OIF; +STRUCT!{struct MIB_IPMCAST_MFE { + dwGroup: DWORD, + dwSource: DWORD, + dwSrcMask: DWORD, + dwUpStrmNgbr: DWORD, + dwInIfIndex: DWORD, + dwInIfProtocol: DWORD, + dwRouteProtocol: DWORD, + dwRouteNetwork: DWORD, + dwRouteMask: DWORD, + ulUpTime: ULONG, + ulExpiryTime: ULONG, + ulTimeOut: ULONG, + ulNumOutIf: ULONG, + fFlags: DWORD, + dwReserved: DWORD, + rgmioOutInfo: [MIB_IPMCAST_OIF; ANY_SIZE], +}} +pub type PMIB_IPMCAST_MFE = *mut MIB_IPMCAST_MFE; +STRUCT!{struct MIB_MFE_TABLE { + dwNumEntries: DWORD, + table: [MIB_IPMCAST_MFE; ANY_SIZE], +}} +pub type PMIB_MFE_TABLE = *mut MIB_MFE_TABLE; +// FIXME: SIZEOF_BASIC_MIB_MFE +// FIXME: SIZEOF_MIB_MFE(x) +STRUCT!{struct MIB_IPMCAST_OIF_STATS_LH { + dwOutIfIndex: DWORD, + dwNextHopAddr: DWORD, + dwDialContext: DWORD, + ulTtlTooLow: ULONG, + ulFragNeeded: ULONG, + ulOutPackets: ULONG, + ulOutDiscards: ULONG, +}} +pub type PMIB_IPMCAST_OIF_STATS_LH = *mut MIB_IPMCAST_OIF_STATS_LH; +STRUCT!{struct MIB_IPMCAST_OIF_STATS_W2K { + dwOutIfIndex: DWORD, + dwNextHopAddr: DWORD, + pvDialContext: PVOID, + ulTtlTooLow: ULONG, + ulFragNeeded: ULONG, + ulOutPackets: ULONG, + ulOutDiscards: ULONG, +}} +pub type PMIB_IPMCAST_OIF_STATS_W2K = *mut MIB_IPMCAST_OIF_STATS_W2K; +pub type MIB_IPMCAST_OIF_STATS = MIB_IPMCAST_OIF_STATS_LH; +pub type PMIB_IPMCAST_OIF_STATS = *mut MIB_IPMCAST_OIF_STATS; +STRUCT!{struct MIB_IPMCAST_MFE_STATS { + dwGroup: DWORD, + dwSource: DWORD, + dwSrcMask: DWORD, + dwUpStrmNgbr: DWORD, + dwInIfIndex: DWORD, + dwInIfProtocol: DWORD, + dwRouteProtocol: DWORD, + dwRouteNetwork: DWORD, + dwRouteMask: DWORD, + ulUpTime: ULONG, + ulExpiryTime: ULONG, + ulNumOutIf: ULONG, + ulInPkts: ULONG, + ulInOctets: ULONG, + ulPktsDifferentIf: ULONG, + ulQueueOverflow: ULONG, + rgmiosOutStats: [MIB_IPMCAST_OIF_STATS; ANY_SIZE], +}} +pub type PMIB_IPMCAST_MFE_STATS = *mut MIB_IPMCAST_MFE_STATS; +STRUCT!{struct MIB_MFE_STATS_TABLE { + dwNumEntries: DWORD, + table: [MIB_IPMCAST_MFE_STATS; ANY_SIZE], +}} +pub type PMIB_MFE_STATS_TABLE = *mut MIB_MFE_STATS_TABLE; +// FIXME: SIZEOF_BASIC_MIB_MFE_STATS +// FIXME: SIZEOF_MIB_MFE_STATS(x) +STRUCT!{struct MIB_IPMCAST_MFE_STATS_EX_XP { + dwGroup: DWORD, + dwSource: DWORD, + dwSrcMask: DWORD, + dwUpStrmNgbr: DWORD, + dwInIfIndex: DWORD, + dwInIfProtocol: DWORD, + dwRouteProtocol: DWORD, + dwRouteNetwork: DWORD, + dwRouteMask: DWORD, + ulUpTime: ULONG, + ulExpiryTime: ULONG, + ulNumOutIf: ULONG, + ulInPkts: ULONG, + ulInOctets: ULONG, + ulPktsDifferentIf: ULONG, + ulQueueOverflow: ULONG, + ulUninitMfe: ULONG, + ulNegativeMfe: ULONG, + ulInDiscards: ULONG, + ulInHdrErrors: ULONG, + ulTotalOutPackets: ULONG, + rgmiosOutStats: [MIB_IPMCAST_OIF_STATS; ANY_SIZE], +}} +pub type PMIB_IPMCAST_MFE_STATS_EX_XP = *mut MIB_IPMCAST_MFE_STATS_EX_XP; +pub type MIB_IPMCAST_MFE_STATS_EX = MIB_IPMCAST_MFE_STATS_EX_XP; +pub type PMIB_IPMCAST_MFE_STATS_EX = *mut MIB_IPMCAST_MFE_STATS_EX; +STRUCT!{struct MIB_MFE_STATS_TABLE_EX_XP { + dwNumEntries: DWORD, + table: [PMIB_IPMCAST_MFE_STATS_EX_XP; ANY_SIZE], +}} +pub type PMIB_MFE_STATS_TABLE_EX_XP = *mut MIB_MFE_STATS_TABLE_EX_XP; +pub type MIB_MFE_STATS_TABLE_EX = MIB_MFE_STATS_TABLE_EX_XP; +pub type PMIB_MFE_STATS_TABLE_EX = *mut MIB_MFE_STATS_TABLE_EX; +// FIXME: SIZEOF_BASIC_MIB_MFE_STATS_EX +// FIXME: SIZEOF_MIB_MFE_STATS_EX(x) +STRUCT!{struct MIB_IPMCAST_GLOBAL { + dwEnable: DWORD, +}} +pub type PMIB_IPMCAST_GLOBAL = *mut MIB_IPMCAST_GLOBAL; +STRUCT!{struct MIB_IPMCAST_IF_ENTRY { + dwIfIndex: DWORD, + dwTtl: DWORD, + dwProtocol: DWORD, + dwRateLimit: DWORD, + ulInMcastOctets: ULONG, + ulOutMcastOctets: ULONG, +}} +pub type PMIB_IPMCAST_IF_ENTRY = *mut MIB_IPMCAST_IF_ENTRY; +STRUCT!{struct MIB_IPMCAST_IF_TABLE { + dwNumEntries: DWORD, + table: [MIB_IPMCAST_IF_ENTRY; ANY_SIZE], +}} +pub type PMIB_IPMCAST_IF_TABLE = *mut MIB_IPMCAST_IF_TABLE; +// FIXME: SIZEOF_MCAST_IF_TABLE(x) diff -Nru cargo-0.44.1/vendor/winapi/src/shared/iprtrmib.rs cargo-0.47.0/vendor/winapi/src/shared/iprtrmib.rs --- cargo-0.44.1/vendor/winapi/src/shared/iprtrmib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/iprtrmib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,125 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +// #include +// #include +// #include +// #include +// #include +// #include +use shared::ipmib::MIB_IPFORWARDROW; +use shared::minwindef::{BOOL, BYTE, DWORD}; +use shared::ntdef::{PWCHAR, ULONGLONG, WCHAR}; +pub const MAX_SCOPE_NAME_LEN: usize = 255; +pub const MAX_MIB_OFFSET: usize = 8; +const ANY_SIZE: usize = 1; +STRUCT!{struct MIB_OPAQUE_QUERY { + dwVarId: DWORD, + rgdwVarIndex: [DWORD; ANY_SIZE], +}} +pub type PMIB_OPAQUE_QUERY = *mut MIB_OPAQUE_QUERY; +ENUM!{enum TCP_TABLE_CLASS { + TCP_TABLE_BASIC_LISTENER = 0, + TCP_TABLE_BASIC_CONNECTIONS = 1, + TCP_TABLE_BASIC_ALL = 2, + TCP_TABLE_OWNER_PID_LISTENER = 3, + TCP_TABLE_OWNER_PID_CONNECTIONS = 4, + TCP_TABLE_OWNER_PID_ALL = 5, + TCP_TABLE_OWNER_MODULE_LISTENER = 6, + TCP_TABLE_OWNER_MODULE_CONNECTIONS = 7, + TCP_TABLE_OWNER_MODULE_ALL = 8, +}} +pub type PTCP_TABLE_CLASS = *mut TCP_TABLE_CLASS; +ENUM!{enum UDP_TABLE_CLASS { + UDP_TABLE_BASIC = 0, + UDP_TABLE_OWNER_PID = 1, + UDP_TABLE_OWNER_MODULE = 2, +}} +pub type PUDP_TABLE_CLASS = *mut UDP_TABLE_CLASS; +ENUM!{enum TCPIP_OWNER_MODULE_INFO_CLASS { + TCPIP_OWNER_MODULE_INFO_BASIC = 0, +}} +pub type PTCPIP_OWNER_MODULE_INFO_CLASS = *mut TCPIP_OWNER_MODULE_INFO_CLASS; +STRUCT!{struct TCPIP_OWNER_MODULE_BASIC_INFO { + pModuleName: PWCHAR, + pModulePath: PWCHAR, +}} +pub type PTCPIP_OWNER_MODULE_BASIC_INFO = *mut TCPIP_OWNER_MODULE_BASIC_INFO; +STRUCT!{struct MIB_IPMCAST_BOUNDARY { + dwIfIndex: DWORD, + dwGroupAddress: DWORD, + dwGroupMask: DWORD, + dwStatus: DWORD, +}} +pub type PMIB_IPMCAST_BOUNDARY = *mut MIB_IPMCAST_BOUNDARY; +STRUCT!{struct MIB_IPMCAST_BOUNDARY_TABLE { + dwNumEntries: DWORD, + table: [MIB_IPMCAST_BOUNDARY; ANY_SIZE], +}} +pub type PMIB_IPMCAST_BOUNDARY_TABLE = *mut MIB_IPMCAST_BOUNDARY_TABLE; +STRUCT!{struct MIB_BOUNDARYROW { + dwGroupAddress: DWORD, + dwGroupMask: DWORD, +}} +pub type PMIB_BOUNDARYROW = *mut MIB_BOUNDARYROW; +STRUCT!{struct MIB_MCAST_LIMIT_ROW { + dwTtl: DWORD, + dwRateLimit: DWORD, +}} +pub type PMIB_MCAST_LIMIT_ROW = *mut MIB_MCAST_LIMIT_ROW; +pub type SN_CHAR = WCHAR; +pub type SCOPE_NAME_BUFFER = [SN_CHAR; MAX_SCOPE_NAME_LEN + 1]; +pub type SCOPE_NAME = *mut SCOPE_NAME_BUFFER; +STRUCT!{struct MIB_IPMCAST_SCOPE { + dwGroupAddress: DWORD, + dwGroupMask: DWORD, + snNameBuffer: SCOPE_NAME_BUFFER, + dwStatus: DWORD, +}} +pub type PMIB_IPMCAST_SCOPE = *mut MIB_IPMCAST_SCOPE; +STRUCT!{struct MIB_IPDESTROW { + ForwardRow: MIB_IPFORWARDROW, + dwForwardPreference: DWORD, + dwForwardViewSet: DWORD, +}} +pub type PMIB_IPDESTROW = *mut MIB_IPDESTROW; +STRUCT!{struct MIB_IPDESTTABLE { + dwNumEntries: DWORD, + table: [MIB_IPDESTROW; ANY_SIZE], +}} +pub type PMIB_IPDESTTABLE = *mut MIB_IPDESTTABLE; +STRUCT!{struct MIB_BEST_IF { + dwDestAddr: DWORD, + dwIfIndex: DWORD, +}} +pub type PMIB_BEST_IF = *mut MIB_BEST_IF; +STRUCT!{struct MIB_PROXYARP { + dwAddress: DWORD, + dwMask: DWORD, + dwIfIndex: DWORD, +}} +pub type PMIB_PROXYARP = *mut MIB_PROXYARP; +STRUCT!{struct MIB_IFSTATUS { + dwIfIndex: DWORD, + dwAdminStatus: DWORD, + dwOperationalStatus: DWORD, + bMHbeatActive: BOOL, + bMHbeatAlive: BOOL, +}} +pub type PMIB_IFSTATUS = *mut MIB_IFSTATUS; +STRUCT!{struct MIB_ROUTESTATE { + bRoutesSetToStack: BOOL, +}} +pub type PMIB_ROUTESTATE = *mut MIB_ROUTESTATE; +UNION!{union MIB_OPAQUE_INFO_u { + [u64; 1], + ullAlign ullAlign_mut: ULONGLONG, + rgbyData rgbyData_mut: [BYTE; 1], +}} +STRUCT!{struct MIB_OPAQUE_INFO { + dwId: DWORD, + u: MIB_OPAQUE_INFO_u, +}} +pub type PMIB_OPAQUE_INFO = *mut MIB_OPAQUE_INFO; diff -Nru cargo-0.44.1/vendor/winapi/src/shared/mod.rs cargo-0.47.0/vendor/winapi/src/shared/mod.rs --- cargo-0.44.1/vendor/winapi/src/shared/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -38,18 +38,25 @@ #[cfg(feature = "hidsdi")] pub mod hidsdi; #[cfg(feature = "hidusage")] pub mod hidusage; #[cfg(feature = "ifdef")] pub mod ifdef; +#[cfg(feature = "ifmib")] pub mod ifmib; #[cfg(feature = "in6addr")] pub mod in6addr; #[cfg(feature = "inaddr")] pub mod inaddr; #[cfg(feature = "intsafe")] pub mod intsafe; +#[cfg(feature = "ipifcons")] pub mod ipifcons; +#[cfg(feature = "ipmib")] pub mod ipmib; +#[cfg(feature = "iprtrmib")] pub mod iprtrmib; #[cfg(feature = "ks")] pub mod ks; #[cfg(feature = "ksmedia")] pub mod ksmedia; #[cfg(feature = "ktmtypes")] pub mod ktmtypes; #[cfg(feature = "lmcons")] pub mod lmcons; #[cfg(feature = "minwindef")] pub mod minwindef; #[cfg(feature = "mmreg")] pub mod mmreg; +#[cfg(feature = "mprapidef")] pub mod mprapidef; #[cfg(feature = "mstcpip")] pub mod mstcpip; #[cfg(feature = "mswsockdef")] pub mod mswsockdef; #[cfg(feature = "netioapi")] pub mod netioapi; +#[cfg(feature = "nldef")] pub mod nldef; +#[cfg(feature = "ntddndis")] pub mod ntddndis; #[cfg(feature = "ntddscsi")] pub mod ntddscsi; #[cfg(feature = "ntddser")] pub mod ntddser; #[cfg(feature = "ntdef")] pub mod ntdef; @@ -61,15 +68,22 @@ #[cfg(feature = "sddl")] pub mod sddl; #[cfg(feature = "sspi")] pub mod sspi; #[cfg(feature = "stralign")] pub mod stralign; +#[cfg(feature = "tcpestats")] pub mod tcpestats; +#[cfg(feature = "tcpmib")] pub mod tcpmib; #[cfg(feature = "transportsettingcommon")] pub mod transportsettingcommon; #[cfg(feature = "tvout")] pub mod tvout; +#[cfg(feature = "udpmib")] pub mod udpmib; #[cfg(feature = "usb")] pub mod usb; +#[cfg(feature = "usbioctl")] pub mod usbioctl; #[cfg(feature = "usbiodef")] pub mod usbiodef; +#[cfg(feature = "usbscan")] pub mod usbscan; #[cfg(feature = "usbspec")] pub mod usbspec; #[cfg(feature = "windef")] pub mod windef; +#[cfg(feature = "windot11")] pub mod windot11; #[cfg(feature = "windowsx")] pub mod windowsx; #[cfg(feature = "winerror")] pub mod winerror; #[cfg(feature = "winusbio")] pub mod winusbio; +#[cfg(feature = "wlantypes")] pub mod wlantypes; #[cfg(feature = "wmistr")] pub mod wmistr; #[cfg(feature = "wnnc")] pub mod wnnc; #[cfg(feature = "ws2def")] pub mod ws2def; diff -Nru cargo-0.44.1/vendor/winapi/src/shared/mprapidef.rs cargo-0.47.0/vendor/winapi/src/shared/mprapidef.rs --- cargo-0.44.1/vendor/winapi/src/shared/mprapidef.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/mprapidef.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,14 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +//! MPR (Multiple Provider Router) definitions +// #include +pub const MAX_INTERFACE_NAME_LEN: usize = 256; +pub const MAX_TRANSPORT_NAME_LEN: usize = 40; +pub const MAX_MEDIA_NAME: usize = 16; +pub const MAX_PORT_NAME: usize = 16; +pub const MAX_DEVICE_NAME: usize = 128; +pub const MAX_PHONE_NUMBER_LEN: usize = 128; +pub const MAX_DEVICETYPE_NAME: usize = 16; diff -Nru cargo-0.44.1/vendor/winapi/src/shared/netioapi.rs cargo-0.47.0/vendor/winapi/src/shared/netioapi.rs --- cargo-0.44.1/vendor/winapi/src/shared/netioapi.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/netioapi.rs 2020-10-01 21:38:28.000000000 +0000 @@ -3,14 +3,553 @@ // , at your option. // All files in the project carrying such notice may not be copied, modified, or distributed // except according to those terms. -use shared::basetsd::SIZE_T; +use shared::basetsd::{PUINT8, SIZE_T, UINT8, ULONG64}; use shared::guiddef::GUID; -use shared::ifdef::{NET_IFINDEX, NET_LUID, PNET_IFINDEX, PNET_LUID}; -use shared::minwindef::DWORD; -use shared::ntdef::{CHAR, PSTR, PWSTR, WCHAR}; +use shared::ifdef::{ + IF_MAX_PHYS_ADDRESS_LENGTH, IF_MAX_STRING_SIZE, IF_OPER_STATUS, NET_IFINDEX, + NET_IF_ACCESS_TYPE, NET_IF_ADMIN_STATUS, NET_IF_COMPARTMENT_ID, NET_IF_COMPARTMENT_SCOPE, + NET_IF_CONNECTION_TYPE, NET_IF_DIRECTION_TYPE, NET_IF_MEDIA_CONNECT_STATE, NET_IF_NETWORK_GUID, + NET_LUID, PNET_IFINDEX, PNET_IF_COMPARTMENT_ID, PNET_IF_COMPARTMENT_SCOPE, PNET_LUID, + TUNNEL_TYPE, +}; +use shared::ipifcons::IFTYPE; +use shared::minwindef::{BYTE, DWORD, PULONG, UCHAR, ULONG, USHORT}; +use shared::nldef::{ + NL_BANDWIDTH_INFORMATION, NL_DAD_STATE, NL_INTERFACE_OFFLOAD_ROD, + NL_LINK_LOCAL_ADDRESS_BEHAVIOR, NL_NEIGHBOR_STATE, NL_PREFIX_ORIGIN, + NL_ROUTER_DISCOVERY_BEHAVIOR, NL_ROUTE_ORIGIN, NL_ROUTE_PROTOCOL, NL_SUFFIX_ORIGIN, +}; +use shared::ntddndis::{NDIS_MEDIUM, NDIS_PHYSICAL_MEDIUM}; +use shared::ntdef::{ + BOOLEAN, CHAR, HANDLE, LARGE_INTEGER, PCHAR, PCSTR, PSTR, PVOID, PWCHAR, PWSTR, WCHAR, +}; +use shared::ws2def::{ADDRESS_FAMILY, SCOPE_ID, ScopeLevelCount}; +use shared::ws2ipdef::{PSOCKADDR_IN6_PAIR, SOCKADDR_IN6, SOCKADDR_INET}; +const ANY_SIZE: usize = 1; pub type NETIO_STATUS = DWORD; pub type NETIOAPI_API = NETIO_STATUS; +ENUM!{enum MIB_NOTIFICATION_TYPE { + MibParameterNotification, + MibAddInstance, + MibDeleteInstance, + MibInitialNotification, +}} +pub type PMIB_NOTIFICATION_TYPE = *mut MIB_NOTIFICATION_TYPE; +STRUCT!{struct MIB_IF_ROW2_InterfaceAndOperStatusFlags { + bitfield: BYTE, +}} +BITFIELD!{MIB_IF_ROW2_InterfaceAndOperStatusFlags bitfield: BOOLEAN [ + HardwareInterface set_HardwareInterface[0..1], + FilterInterface set_FilterInterface[1..2], + ConnectorPresent set_ConnectorPresent[2..3], + NotAuthenticated set_NotAuthenticated[3..4], + NotMediaConnected set_NotMediaConnected[4..5], + Paused set_Paused[5..6], + LowPower set_LowPower[6..7], + EndPointInterface set_EndPointInterface[7..8], +]} +STRUCT!{struct MIB_IF_ROW2 { + InterfaceLuid: NET_LUID, + InterfaceIndex: NET_IFINDEX, + InterfaceGuid: GUID, + Alias: [WCHAR; IF_MAX_STRING_SIZE + 1], + Description: [WCHAR; IF_MAX_STRING_SIZE + 1], + PhysicalAddressLength: ULONG, + PhysicalAddress: [UCHAR; IF_MAX_PHYS_ADDRESS_LENGTH], + PermanentPhysicalAddress: [UCHAR; IF_MAX_PHYS_ADDRESS_LENGTH], + Mtu: ULONG, + Type: IFTYPE, + TunnelType: TUNNEL_TYPE, + MediaType: NDIS_MEDIUM, + PhysicalMediumType: NDIS_PHYSICAL_MEDIUM, + AccessType: NET_IF_ACCESS_TYPE, + DirectionType: NET_IF_DIRECTION_TYPE, + InterfaceAndOperStatusFlags: MIB_IF_ROW2_InterfaceAndOperStatusFlags, + OperStatus: IF_OPER_STATUS, + AdminStatus: NET_IF_ADMIN_STATUS, + MediaConnectState: NET_IF_MEDIA_CONNECT_STATE, + NetworkGuid: NET_IF_NETWORK_GUID, + ConnectionType: NET_IF_CONNECTION_TYPE, + TransmitLinkSpeed: ULONG64, + ReceiveLinkSpeed: ULONG64, + InOctets: ULONG64, + InUcastPkts: ULONG64, + InNUcastPkts: ULONG64, + InDiscards: ULONG64, + InErrors: ULONG64, + InUnknownProtos: ULONG64, + InUcastOctets: ULONG64, + InMulticastOctets: ULONG64, + InBroadcastOctets: ULONG64, + OutOctets: ULONG64, + OutUcastPkts: ULONG64, + OutNUcastPkts: ULONG64, + OutDiscards: ULONG64, + OutErrors: ULONG64, + OutUcastOctets: ULONG64, + OutMulticastOctets: ULONG64, + OutBroadcastOctets: ULONG64, + OutQLen: ULONG64, +}} +pub type PMIB_IF_ROW2 = *mut MIB_IF_ROW2; +STRUCT!{struct MIB_IF_TABLE2 { + NumEntries: ULONG, + Table: [MIB_IF_ROW2; ANY_SIZE], +}} +pub type PMIB_IF_TABLE2 = *mut MIB_IF_TABLE2; extern "system" { + pub fn GetIfEntry2( + Row: PMIB_IF_ROW2, + ) -> NETIOAPI_API; +} +ENUM!{enum MIB_IF_ENTRY_LEVEL { + MibIfEntryNormal = 0, + MibIfEntryNormalWithoutStatistics = 2, +}} +pub type PMIB_IF_ENTRY_LEVEL = *mut MIB_IF_ENTRY_LEVEL; +extern "system" { + pub fn GetIfEntry2Ex( + Level: MIB_IF_ENTRY_LEVEL, + Row: PMIB_IF_ROW2, + ) -> NETIOAPI_API; + pub fn GetIfTable2( + Table: *mut PMIB_IF_TABLE2, + ) -> NETIOAPI_API; +} +ENUM!{enum MIB_IF_TABLE_LEVEL { + MibIfTableNormal = 0, + MibIfTableRaw = 1, + MibIfTableNormalWithoutStatistics = 2, +}} +pub type PMIB_IF_TABLE_LEVEL = *mut MIB_IF_TABLE_LEVEL; +extern "system" { + pub fn GetIfTable2Ex( + Level: MIB_IF_TABLE_LEVEL, + Table: *mut PMIB_IF_TABLE2, + ) -> NETIOAPI_API; +} +STRUCT!{struct MIB_IPINTERFACE_ROW { + Family: ADDRESS_FAMILY, + InterfaceLuid: NET_LUID, + InterfaceIndex: NET_IFINDEX, + MaxReassemblySize: ULONG, + InterfaceIdentifier: ULONG64, + MinRouterAdvertisementInterval: ULONG, + MaxRouterAdvertisementInterval: ULONG, + AdvertisingEnabled: BOOLEAN, + ForwardingEnabled: BOOLEAN, + WeakHostSend: BOOLEAN, + WeakHostReceive: BOOLEAN, + UseAutomaticMetric: BOOLEAN, + UseNeighborUnreachabilityDetection: BOOLEAN, + ManagedAddressConfigurationSupported: BOOLEAN, + OtherStatefulConfigurationSupported: BOOLEAN, + AdvertiseDefaultRoute: BOOLEAN, + RouterDiscoveryBehavior: NL_ROUTER_DISCOVERY_BEHAVIOR, + DadTransmits: ULONG, // DupAddrDetectTransmits in RFC 2462. + BaseReachableTime: ULONG, + RetransmitTime: ULONG, + PathMtuDiscoveryTimeout: ULONG, // Path MTU discovery timeout (in ms). + LinkLocalAddressBehavior: NL_LINK_LOCAL_ADDRESS_BEHAVIOR, + LinkLocalAddressTimeout: ULONG, // In ms. + ZoneIndices: [ULONG; ScopeLevelCount as usize], // Zone part of a SCOPE_ID. + SitePrefixLength: ULONG, + Metric: ULONG, + NlMtu: ULONG, + Connected: BOOLEAN, + SupportsWakeUpPatterns: BOOLEAN, + SupportsNeighborDiscovery: BOOLEAN, + SupportsRouterDiscovery: BOOLEAN, + ReachableTime: ULONG, + TransmitOffload: NL_INTERFACE_OFFLOAD_ROD, + ReceiveOffload: NL_INTERFACE_OFFLOAD_ROD, + DisableDefaultRoutes: BOOLEAN, +}} +pub type PMIB_IPINTERFACE_ROW = *mut MIB_IPINTERFACE_ROW; +STRUCT!{struct MIB_IPINTERFACE_TABLE { + NumEntries: ULONG, + Table: [MIB_IPINTERFACE_ROW; ANY_SIZE], +}} +pub type PMIB_IPINTERFACE_TABLE = *mut MIB_IPINTERFACE_TABLE; +STRUCT!{struct MIB_IFSTACK_ROW { + HigherLayerInterfaceIndex: NET_IFINDEX, + LowerLayerInterfaceIndex: NET_IFINDEX, +}} +pub type PMIB_IFSTACK_ROW = *mut MIB_IFSTACK_ROW; +STRUCT!{struct MIB_INVERTEDIFSTACK_ROW { + LowerLayerInterfaceIndex: NET_IFINDEX, + HigherLayerInterfaceIndex: NET_IFINDEX, +}} +pub type PMIB_INVERTEDIFSTACK_ROW = *mut MIB_INVERTEDIFSTACK_ROW; +STRUCT!{struct MIB_IFSTACK_TABLE { + NumEntries: ULONG, + Table: [MIB_IFSTACK_ROW; ANY_SIZE], +}} +pub type PMIB_IFSTACK_TABLE = *mut MIB_IFSTACK_TABLE; +STRUCT!{struct MIB_INVERTEDIFSTACK_TABLE { + NumEntries: ULONG, + Table: [MIB_INVERTEDIFSTACK_ROW; ANY_SIZE], +}} +pub type PMIB_INVERTEDIFSTACK_TABLE = *mut MIB_INVERTEDIFSTACK_TABLE; +FN!{stdcall PIPINTERFACE_CHANGE_CALLBACK( + CallerContext: PVOID, + Row: PMIB_IPINTERFACE_ROW, + NotificationType: MIB_NOTIFICATION_TYPE, +) -> ()} +STRUCT!{struct MIB_IP_NETWORK_CONNECTION_BANDWIDTH_ESTIMATES { + InboundBandwidthInformation: NL_BANDWIDTH_INFORMATION, + OutboundBandwidthInformation: NL_BANDWIDTH_INFORMATION, +}} +pub type PMIB_IP_NETWORK_CONNECTION_BANDWIDTH_ESTIMATES = *mut + MIB_IP_NETWORK_CONNECTION_BANDWIDTH_ESTIMATES; +extern "system" { + pub fn GetIfStackTable( + Table: *mut PMIB_IFSTACK_TABLE, + ) -> NETIOAPI_API; + pub fn GetInvertedIfStackTable( + Table: *mut PMIB_INVERTEDIFSTACK_TABLE, + ) -> NETIOAPI_API; + pub fn GetIpInterfaceEntry( + Row: PMIB_IPINTERFACE_ROW, + ) -> NETIOAPI_API; + pub fn GetIpInterfaceTable( + Family: ADDRESS_FAMILY, + Table: *mut PMIB_IPINTERFACE_TABLE, + ) -> NETIOAPI_API; + pub fn InitializeIpInterfaceEntry( + Row: PMIB_IPINTERFACE_ROW, + ); + pub fn NotifyIpInterfaceChange( + Family: ADDRESS_FAMILY, + Callback: PIPINTERFACE_CHANGE_CALLBACK, + CallerContext: PVOID, + InitialNotification: BOOLEAN, + NotificationHandle: *mut HANDLE + ) -> NETIOAPI_API; + pub fn SetIpInterfaceEntry( + Row: PMIB_IPINTERFACE_ROW, + ) -> NETIOAPI_API; + pub fn GetIpNetworkConnectionBandwidthEstimates( + InterfaceIndex: NET_IFINDEX, + AddressFamily: ADDRESS_FAMILY, + BandwidthEstimates: PMIB_IP_NETWORK_CONNECTION_BANDWIDTH_ESTIMATES, + ) -> NETIOAPI_API; +} +STRUCT!{struct MIB_UNICASTIPADDRESS_ROW { + Address: SOCKADDR_INET, + InterfaceLuid: NET_LUID, + InterfaceIndex: NET_IFINDEX, + PrefixOrigin: NL_PREFIX_ORIGIN, + SuffixOrigin: NL_SUFFIX_ORIGIN, + ValidLifetime: ULONG, + PreferredLifetime: ULONG, + OnLinkPrefixLength: UINT8, + SkipAsSource: BOOLEAN, + DadState: NL_DAD_STATE, + ScopeId: SCOPE_ID, + CreationTimeStamp: LARGE_INTEGER, +}} +pub type PMIB_UNICASTIPADDRESS_ROW = *mut MIB_UNICASTIPADDRESS_ROW; +STRUCT!{struct MIB_UNICASTIPADDRESS_TABLE { + NumEntries: ULONG, + Table: [MIB_UNICASTIPADDRESS_ROW; ANY_SIZE], +}} +pub type PMIB_UNICASTIPADDRESS_TABLE = *mut MIB_UNICASTIPADDRESS_TABLE; +FN!{stdcall PUNICAST_IPADDRESS_CHANGE_CALLBACK( + CallerContext: PVOID, + Row: PMIB_UNICASTIPADDRESS_ROW, + NotificationType: MIB_NOTIFICATION_TYPE, +) -> ()} +extern "system" { + pub fn CreateUnicastIpAddressEntry( + Row: *const MIB_UNICASTIPADDRESS_ROW, + ) -> NETIOAPI_API; + pub fn DeleteUnicastIpAddressEntry( + Row: *const MIB_UNICASTIPADDRESS_ROW, + ) -> NETIOAPI_API; + pub fn GetUnicastIpAddressEntry( + Row: PMIB_UNICASTIPADDRESS_ROW + ) -> NETIOAPI_API; + pub fn GetUnicastIpAddressTable( + Family: ADDRESS_FAMILY, + Table: *mut PMIB_UNICASTIPADDRESS_TABLE, + ) -> NETIOAPI_API; + pub fn InitializeUnicastIpAddressEntry( + Row: PMIB_UNICASTIPADDRESS_ROW, + ); + pub fn NotifyUnicastIpAddressChange( + Family: ADDRESS_FAMILY, + Callback: PUNICAST_IPADDRESS_CHANGE_CALLBACK, + CallerContext: PVOID, + InitialNotification: BOOLEAN, + NotificationHandle: *mut HANDLE, + ) -> NETIOAPI_API; +} +FN!{stdcall PSTABLE_UNICAST_IPADDRESS_TABLE_CALLBACK( + CallerContext: PVOID, + AddressTable: PMIB_UNICASTIPADDRESS_TABLE, +) -> ()} +extern "system" { + pub fn NotifyStableUnicastIpAddressTable( + Family: ADDRESS_FAMILY, + Table: *mut PMIB_UNICASTIPADDRESS_TABLE, + CallerCallback: PSTABLE_UNICAST_IPADDRESS_TABLE_CALLBACK, + CallerContext: PVOID, + NotificationHandle: *mut HANDLE, + ) -> NETIOAPI_API; + pub fn SetUnicastIpAddressEntry( + Row: *const MIB_UNICASTIPADDRESS_ROW, + ) -> NETIOAPI_API; +} +STRUCT!{struct MIB_ANYCASTIPADDRESS_ROW { + Address: SOCKADDR_INET, + InterfaceLuid: NET_LUID, + InterfaceIndex: NET_IFINDEX, + ScopeId: SCOPE_ID, +}} +pub type PMIB_ANYCASTIPADDRESS_ROW = *mut MIB_ANYCASTIPADDRESS_ROW; +STRUCT!{struct MIB_ANYCASTIPADDRESS_TABLE { + NumEntries: ULONG, + Table: [MIB_ANYCASTIPADDRESS_ROW; ANY_SIZE], +}} +pub type PMIB_ANYCASTIPADDRESS_TABLE = *mut MIB_ANYCASTIPADDRESS_TABLE; +extern "system" { + pub fn CreateAnycastIpAddressEntry( + Row: *const MIB_ANYCASTIPADDRESS_ROW, + ) -> NETIOAPI_API; + pub fn DeleteAnycastIpAddressEntry( + Row: *const MIB_ANYCASTIPADDRESS_ROW, + ) -> NETIOAPI_API; + pub fn GetAnycastIpAddressEntry( + Row: PMIB_ANYCASTIPADDRESS_ROW, + ) -> NETIOAPI_API; + pub fn GetAnycastIpAddressTable( + Family: ADDRESS_FAMILY, + Table: *mut PMIB_ANYCASTIPADDRESS_TABLE, + ) -> NETIOAPI_API; +} +STRUCT!{struct MIB_MULTICASTIPADDRESS_ROW { + Address: SOCKADDR_INET, + InterfaceIndex: NET_IFINDEX, + InterfaceLuid: NET_LUID, + ScopeId: SCOPE_ID, +}} +pub type PMIB_MULTICASTIPADDRESS_ROW = *mut MIB_MULTICASTIPADDRESS_ROW; +STRUCT!{struct MIB_MULTICASTIPADDRESS_TABLE { + NumEntries: ULONG, + Table: [MIB_MULTICASTIPADDRESS_ROW; ANY_SIZE], +}} +pub type PMIB_MULTICASTIPADDRESS_TABLE = *mut MIB_MULTICASTIPADDRESS_TABLE; +extern "system" { + pub fn GetMulticastIpAddressEntry( + Row: PMIB_MULTICASTIPADDRESS_ROW, + ) -> NETIOAPI_API; + pub fn GetMulticastIpAddressTable( + Family: ADDRESS_FAMILY, + Table: *mut PMIB_MULTICASTIPADDRESS_TABLE, + ) -> NETIOAPI_API; +} +STRUCT!{struct IP_ADDRESS_PREFIX { + Prefix: SOCKADDR_INET, + PrefixLength: UINT8, +}} +pub type PIP_ADDRESS_PREFIX = *mut IP_ADDRESS_PREFIX; +STRUCT!{struct MIB_IPFORWARD_ROW2 { + InterfaceLuid: NET_LUID, + InterfaceIndex: NET_IFINDEX, + DestinationPrefix: IP_ADDRESS_PREFIX, + NextHop: SOCKADDR_INET, + SitePrefixLength: UCHAR, + ValidLifetime: ULONG, + PreferredLifetime: ULONG, + Metric: ULONG, + Protocol: NL_ROUTE_PROTOCOL, + Loopback: BOOLEAN, + AutoconfigureAddress: BOOLEAN, + Publish: BOOLEAN, + Immortal: BOOLEAN, + Age: ULONG, + Origin: NL_ROUTE_ORIGIN, +}} +pub type PMIB_IPFORWARD_ROW2 = *mut MIB_IPFORWARD_ROW2; +STRUCT!{struct MIB_IPFORWARD_TABLE2 { + NumEntries: ULONG, + Table: [MIB_IPFORWARD_ROW2; ANY_SIZE], +}} +pub type PMIB_IPFORWARD_TABLE2 = *mut MIB_IPFORWARD_TABLE2; +FN!{stdcall PIPFORWARD_CHANGE_CALLBACK( + CallerContext: PVOID, + Row: PMIB_IPFORWARD_ROW2, + NotificationType: MIB_NOTIFICATION_TYPE, +) -> ()} +extern "system" { + pub fn CreateIpForwardEntry2( + Row: *const MIB_IPFORWARD_ROW2, + ) -> NETIOAPI_API; + pub fn DeleteIpForwardEntry2( + Row: *const MIB_IPFORWARD_ROW2, + ) -> NETIOAPI_API; + pub fn GetBestRoute2( + InterfaceLuid: *mut NET_LUID, + InterfaceIndex: NET_IFINDEX, + SourceAddress: *const SOCKADDR_INET, + DestinationAddress: *const SOCKADDR_INET, + AddressSortOptions: ULONG, + BestRoute: PMIB_IPFORWARD_ROW2, + BestSourceAddress: *mut SOCKADDR_INET, + ) -> NETIOAPI_API; + pub fn GetIpForwardEntry2( + Row: PMIB_IPFORWARD_ROW2, + ) -> NETIOAPI_API; + pub fn GetIpForwardTable2( + Family: ADDRESS_FAMILY, + Table: *mut PMIB_IPFORWARD_TABLE2, + ) -> NETIOAPI_API; + pub fn InitializeIpForwardEntry( + Row: PMIB_IPFORWARD_ROW2, + ); + pub fn NotifyRouteChange2( + AddressFamily: ADDRESS_FAMILY, + Callback: PIPFORWARD_CHANGE_CALLBACK, + CallerContext: PVOID, + InitialNotification: BOOLEAN, + NotificationHandle: *mut HANDLE, + ) -> NETIOAPI_API; + pub fn SetIpForwardEntry2( + Route: *const MIB_IPFORWARD_ROW2, + ) -> NETIOAPI_API; +} +UNION!{union MIB_IPPATH_ROW_u { + [u32; 1], + LastReachable LastReachable_mut: ULONG, // Milliseconds. + LastUnreachable LastUnreachable_mut: ULONG, // Milliseconds. +}} +STRUCT!{struct MIB_IPPATH_ROW { + Source: SOCKADDR_INET, + Destination: SOCKADDR_INET, + InterfaceLuid: NET_LUID, + InterfaceIndex: NET_IFINDEX, + CurrentNextHop: SOCKADDR_INET, + PathMtu: ULONG, + RttMean: ULONG, + RttDeviation: ULONG, + u: MIB_IPPATH_ROW_u, + IsReachable: BOOLEAN, + LinkTransmitSpeed: ULONG64, + LinkReceiveSpeed: ULONG64, +}} +pub type PMIB_IPPATH_ROW = *mut MIB_IPPATH_ROW; +STRUCT!{struct MIB_IPPATH_TABLE { + NumEntries: ULONG, + Table: [MIB_IPPATH_ROW; ANY_SIZE], +}} +pub type PMIB_IPPATH_TABLE = *mut MIB_IPPATH_TABLE; +extern "system" { + pub fn FlushIpPathTable( + Family: ADDRESS_FAMILY, + ) -> NETIOAPI_API; + pub fn GetIpPathEntry( + Row: PMIB_IPPATH_ROW, + ) -> NETIOAPI_API; + pub fn GetIpPathTable( + Family: ADDRESS_FAMILY, + Table: *mut PMIB_IPPATH_TABLE, + ) -> NETIOAPI_API; +} +STRUCT!{struct MIB_IPNET_ROW2_s { + Flags: UCHAR, +}} +BITFIELD!{MIB_IPNET_ROW2_s Flags: UCHAR [ + IsRouter set_IsRouter[0..1], + IsUnreachable set_IsUnreachable[1..2], + Reserved set_Reserved[2..8], +]} +UNION!{union MIB_IPNET_ROW2_ReachabilityTime { + [u32; 1], + LastReachable LastReachable_mut: ULONG, + LastUnreachable LastUnreachable_mut: ULONG, +}} +STRUCT!{struct MIB_IPNET_ROW2 { + Address: SOCKADDR_INET, + InterfaceIndex: NET_IFINDEX, + InterfaceLuid: NET_LUID, + PhysicalAddress: [UCHAR; IF_MAX_PHYS_ADDRESS_LENGTH], + PhysicalAddressLength: ULONG, + State: NL_NEIGHBOR_STATE, + s: MIB_IPNET_ROW2_s, + ReachabilityTime: MIB_IPNET_ROW2_ReachabilityTime, +}} +pub type PMIB_IPNET_ROW2 = *mut MIB_IPNET_ROW2; +STRUCT!{struct MIB_IPNET_TABLE2 { + NumEntries: ULONG, + Table: [MIB_IPNET_ROW2; ANY_SIZE], +}} +pub type PMIB_IPNET_TABLE2 = *mut MIB_IPNET_TABLE2; +extern "system" { + pub fn CreateIpNetEntry2( + Row: *const MIB_IPNET_ROW2, + ) -> NETIOAPI_API; + pub fn DeleteIpNetEntry2( + Row: *const MIB_IPNET_ROW2, + ) -> NETIOAPI_API; + pub fn FlushIpNetTable2( + Family: ADDRESS_FAMILY, + InterfaceIndex: NET_IFINDEX, + ) -> NETIOAPI_API; + pub fn GetIpNetEntry2( + Row: PMIB_IPNET_ROW2, + ) -> NETIOAPI_API; + pub fn GetIpNetTable2( + Family: ADDRESS_FAMILY, + Table: *mut PMIB_IPNET_TABLE2, + ) -> NETIOAPI_API; + pub fn ResolveIpNetEntry2( + Row: PMIB_IPNET_ROW2, + SourceAddress: *const SOCKADDR_INET, + ) -> NETIOAPI_API; + pub fn SetIpNetEntry2( + Row: PMIB_IPNET_ROW2, + ) -> NETIOAPI_API; +} +pub const MIB_INVALID_TEREDO_PORT_NUMBER: USHORT = 0; +FN!{stdcall PTEREDO_PORT_CHANGE_CALLBACK( + CallerContext: PVOID, + Port: USHORT, + NotificationType: MIB_NOTIFICATION_TYPE, +) -> ()} +extern "system" { + pub fn NotifyTeredoPortChange( + Callback: PTEREDO_PORT_CHANGE_CALLBACK, + CallerContext: PVOID, + InitialNotification: BOOLEAN, + NotificationHandle: *mut HANDLE, + ) -> NETIOAPI_API; + pub fn GetTeredoPort( + Port: *mut USHORT, + ) -> NETIOAPI_API; + pub fn CancelMibChangeNotify2( + NotificationHandle: HANDLE, + ) -> NETIOAPI_API; + pub fn FreeMibTable( + Memory: PVOID, + ); + pub fn CreateSortedAddressPairs( + SourceAddressList: *const SOCKADDR_IN6, + SourceAddressCount: ULONG, + DestinationAddressList: *const SOCKADDR_IN6, + DestinationAddressCount: ULONG, + AddressSortOptions: ULONG, + SortedAddressPairList: *mut PSOCKADDR_IN6_PAIR, + SortedAddressPairCount: *mut ULONG, + ) -> NETIOAPI_API; + pub fn ConvertCompartmentGuidToId( + CompartmentGuid: *const GUID, + CompartmentId: PNET_IF_COMPARTMENT_ID, + ) -> NETIOAPI_API; + pub fn ConvertCompartmentIdToGuid( + CompartmentId: NET_IF_COMPARTMENT_ID, + CompartmentGuid: *mut GUID, + ) -> NETIOAPI_API; pub fn ConvertInterfaceNameToLuidA( InterfaceName: *const CHAR, InterfaceLuid: *mut NET_LUID, @@ -54,4 +593,110 @@ InterfaceGuid: *const GUID, InterfaceLuid: PNET_LUID, ) -> NETIOAPI_API; + pub fn if_nametoindex( + InterfaceName: PCSTR, + ) -> NET_IFINDEX; + pub fn if_indextoname( + InterfaceIndex: NET_IFINDEX, + InterfaceName: PCHAR, + ) -> PCHAR; + pub fn GetCurrentThreadCompartmentId() -> NET_IF_COMPARTMENT_ID; + pub fn SetCurrentThreadCompartmentId( + CompartmentId: NET_IF_COMPARTMENT_ID + ) -> NETIOAPI_API; + pub fn GetCurrentThreadCompartmentScope( + CompartmentScope: PNET_IF_COMPARTMENT_SCOPE, + CompartmentId: PNET_IF_COMPARTMENT_ID, + ); + pub fn SetCurrentThreadCompartmentScope( + CompartmentScope: NET_IF_COMPARTMENT_SCOPE, + ) -> NETIOAPI_API; + pub fn GetJobCompartmentId( + JobHandle: HANDLE, + ) -> NET_IF_COMPARTMENT_ID; + pub fn SetJobCompartmentId( + JobHandle: HANDLE, + CompartmentId: NET_IF_COMPARTMENT_ID, + ) -> NETIOAPI_API; + pub fn GetSessionCompartmentId( + SessionId: ULONG, + ) -> NET_IF_COMPARTMENT_ID; + pub fn SetSessionCompartmentId( + SessionId: ULONG, + CompartmentId: NET_IF_COMPARTMENT_ID, + ) -> NETIOAPI_API; + pub fn GetDefaultCompartmentId() -> NET_IF_COMPARTMENT_ID; + pub fn GetNetworkInformation( + NetworkGuid: *const NET_IF_NETWORK_GUID, + CompartmentId: PNET_IF_COMPARTMENT_ID, + SiteId: PULONG, + NetworkName: PWCHAR, + Length: ULONG, + ) -> NETIOAPI_API; + pub fn SetNetworkInformation( + NetworkGuid: *const NET_IF_NETWORK_GUID, + CompartmentId: NET_IF_COMPARTMENT_ID, + NetworkName: *const WCHAR, + ) -> NETIOAPI_API; + pub fn ConvertLengthToIpv4Mask( + MaskLength: ULONG, + Mask: PULONG, + ) -> NETIOAPI_API; + pub fn ConvertIpv4MaskToLength( + Mask: ULONG, + MaskLength: PUINT8, + ) -> NETIOAPI_API; +} +pub const DNS_SETTINGS_VERSION1: ULONG = 0x0001; +pub const DNS_INTERFACE_SETTINGS_VERSION1: ULONG = 0x0001; +pub const DNS_SETTING_IPV6: ULONG64 = 0x0001; +pub const DNS_SETTING_NAMESERVER: ULONG64 = 0x0002; +pub const DNS_SETTING_SEARCHLIST: ULONG64 = 0x0004; +pub const DNS_SETTING_REGISTRATION_ENABLED: ULONG64 = 0x0008; +pub const DNS_SETTING_REGISTER_ADAPTER_NAME: ULONG64 = 0x0010; +pub const DNS_SETTING_DOMAIN: ULONG64 = 0x0020; +pub const DNS_SETTING_HOSTNAME: ULONG64 = 0x0040; +pub const DNS_SETTINGS_ENABLE_LLMNR: ULONG64 = 0x0080; +pub const DNS_SETTINGS_QUERY_ADAPTER_NAME: ULONG64 = 0x0100; +pub const DNS_SETTING_PROFILE_NAMESERVER: ULONG64 = 0x0200; +STRUCT!{struct DNS_SETTINGS { + Version: ULONG, + Flags: ULONG64, + Hostname: PWSTR, + Domain: PWSTR, + SearchList: PWSTR, +}} +STRUCT!{struct DNS_INTERFACE_SETTINGS { + Version: ULONG, + Flags: ULONG64, + Domain: PWSTR, + NameServer: PWSTR, + SearchList: PWSTR, + RegistrationEnabled: ULONG, + RegisterAdapterName: ULONG, + EnableLLMNR: ULONG, + QueryAdapterName: ULONG, + ProfileNameServer: PWSTR, +}} +extern "system" { + pub fn GetDnsSettings( + Settings: *mut DNS_SETTINGS, + ) -> NETIOAPI_API; + pub fn FreeDnsSettings( + Settings: *mut DNS_SETTINGS, + ); + pub fn SetDnsSettings( + Settings: *const DNS_SETTINGS, + ) -> NETIOAPI_API; + pub fn GetInterfaceDnsSettings( + Interface: GUID, + Settings: *mut DNS_INTERFACE_SETTINGS, + ) -> NETIOAPI_API; + pub fn FreeInterfaceDnsSettings( + Settings: *mut DNS_INTERFACE_SETTINGS, + ); + pub fn SetInterfaceDnsSettings( + Interface: GUID, + Settings: *const DNS_INTERFACE_SETTINGS, + ) -> NETIOAPI_API; } diff -Nru cargo-0.44.1/vendor/winapi/src/shared/nldef.rs cargo-0.47.0/vendor/winapi/src/shared/nldef.rs --- cargo-0.44.1/vendor/winapi/src/shared/nldef.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/nldef.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,204 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +use shared::basetsd::ULONG64; +use shared::minwindef::ULONG; +use shared::ntdef::BOOLEAN; +ENUM!{enum NL_PREFIX_ORIGIN { + IpPrefixOriginOther = 0, + IpPrefixOriginManual, + IpPrefixOriginWellKnown, + IpPrefixOriginDhcp, + IpPrefixOriginRouterAdvertisement, + IpPrefixOriginUnchanged = 1 << 4, +}} +pub const NlpoOther: NL_PREFIX_ORIGIN = IpPrefixOriginOther; +pub const NlpoManual: NL_PREFIX_ORIGIN = IpPrefixOriginManual; +pub const NlpoWellKnown: NL_PREFIX_ORIGIN = IpPrefixOriginWellKnown; +pub const NlpoDhcp: NL_PREFIX_ORIGIN = IpPrefixOriginDhcp; +pub const NlpoRouterAdvertisement: NL_PREFIX_ORIGIN = IpPrefixOriginRouterAdvertisement; +ENUM!{enum NL_SUFFIX_ORIGIN { + NlsoOther = 0, + NlsoManual, + NlsoWellKnown, + NlsoDhcp, + NlsoLinkLayerAddress, + NlsoRandom, + IpSuffixOriginOther = 0, + IpSuffixOriginManual, + IpSuffixOriginWellKnown, + IpSuffixOriginDhcp, + IpSuffixOriginLinkLayerAddress, + IpSuffixOriginRandom, + IpSuffixOriginUnchanged = 1 << 4, +}} +ENUM!{enum NL_DAD_STATE { + NldsInvalid, + NldsTentative, + NldsDuplicate, + NldsDeprecated, + NldsPreferred, + IpDadStateInvalid = 0, + IpDadStateTentative, + IpDadStateDuplicate, + IpDadStateDeprecated, + IpDadStatePreferred, +}} +pub const NL_MAX_METRIC_COMPONENT: ULONG = (1u32 << 31) - 1; +ENUM!{enum NL_ROUTE_PROTOCOL { + RouteProtocolOther = 1, + RouteProtocolLocal = 2, + RouteProtocolNetMgmt = 3, + RouteProtocolIcmp = 4, + RouteProtocolEgp = 5, + RouteProtocolGgp = 6, + RouteProtocolHello = 7, + RouteProtocolRip = 8, + RouteProtocolIsIs = 9, + RouteProtocolEsIs = 10, + RouteProtocolCisco = 11, + RouteProtocolBbn = 12, + RouteProtocolOspf = 13, + RouteProtocolBgp = 14, + RouteProtocolIdpr = 15, + RouteProtocolEigrp = 16, + RouteProtocolDvmrp = 17, + RouteProtocolRpl = 18, + RouteProtocolDhcp = 19, + MIB_IPPROTO_OTHER = 1, + PROTO_IP_OTHER = 1, + MIB_IPPROTO_LOCAL = 2, + PROTO_IP_LOCAL = 2, + MIB_IPPROTO_NETMGMT = 3, + PROTO_IP_NETMGMT = 3, + MIB_IPPROTO_ICMP = 4, + PROTO_IP_ICMP = 4, + MIB_IPPROTO_EGP = 5, + PROTO_IP_EGP = 5, + MIB_IPPROTO_GGP = 6, + PROTO_IP_GGP = 6, + MIB_IPPROTO_HELLO = 7, + PROTO_IP_HELLO = 7, + MIB_IPPROTO_RIP = 8, + PROTO_IP_RIP = 8, + MIB_IPPROTO_IS_IS = 9, + PROTO_IP_IS_IS = 9, + MIB_IPPROTO_ES_IS = 10, + PROTO_IP_ES_IS = 10, + MIB_IPPROTO_CISCO = 11, + PROTO_IP_CISCO = 11, + MIB_IPPROTO_BBN = 12, + PROTO_IP_BBN = 12, + MIB_IPPROTO_OSPF = 13, + PROTO_IP_OSPF = 13, + MIB_IPPROTO_BGP = 14, + PROTO_IP_BGP = 14, + MIB_IPPROTO_IDPR = 15, + PROTO_IP_IDPR = 15, + MIB_IPPROTO_EIGRP = 16, + PROTO_IP_EIGRP = 16, + MIB_IPPROTO_DVMRP = 17, + PROTO_IP_DVMRP = 17, + MIB_IPPROTO_RPL = 18, + PROTO_IP_RPL = 18, + MIB_IPPROTO_DHCP = 19, + PROTO_IP_DHCP = 19, + MIB_IPPROTO_NT_AUTOSTATIC = 10002, + PROTO_IP_NT_AUTOSTATIC = 10002, + MIB_IPPROTO_NT_STATIC = 10006, + PROTO_IP_NT_STATIC = 10006, + MIB_IPPROTO_NT_STATIC_NON_DOD = 10007, + PROTO_IP_NT_STATIC_NON_DOD = 10007, +}} +pub type PNL_ROUTE_PROTOCOL = *mut NL_ROUTE_PROTOCOL; +ENUM!{enum NL_ADDRESS_TYPE { + NlatUnspecified = 0, + NlatUnicast = 1, + NlatAnycast = 2, + NlatMulticast = 3, + NlatBroadcast = 4, + NlatInvalid = 5, +}} +pub type PNL_ADDRESS_TYPE = *mut NL_ADDRESS_TYPE; +ENUM!{enum NL_ROUTE_ORIGIN { + NlroManual = 0, + NlroWellKnown = 1, + NlroDHCP = 2, + NlroRouterAdvertisement = 3, + Nlro6to4 = 4, +}} +pub type PNL_ROUTE_ORIGIN = *mut NL_ROUTE_ORIGIN; +ENUM!{enum NL_NEIGHBOR_STATE { + NlnsUnreachable = 0, + NlnsIncomplete = 1, + NlnsProbe = 2, + NlnsDelay = 3, + NlnsStale = 4, + NlnsReachable = 5, + NlnsPermanent = 6, + NlnsMaximum = 7, +}} +pub type PNL_NEIGHBOR_STATE = *mut NL_NEIGHBOR_STATE; +ENUM!{enum NL_LINK_LOCAL_ADDRESS_BEHAVIOR { + LinkLocalAlwaysOff = 0, + LinkLocalDelayed = 1, + LinkLocalAlwaysOn = 2, + LinkLocalUnchanged = -1i32 as u32, +}} +STRUCT!{struct NL_INTERFACE_OFFLOAD_ROD { + bitfield: BOOLEAN, +}} +BITFIELD!{NL_INTERFACE_OFFLOAD_ROD bitfield: BOOLEAN [ + NlChecksumSupported set_NlChecksumSupported[0..1], + NlOptionsSupported set_NlOptionsSupported[1..2], + TlDatagramChecksumSupported set_TlDatagramChecksumSupported[2..3], + TlStreamChecksumSupported set_TlStreamChecksumSupported[3..4], + TlStreamOptionsSupported set_TlStreamOptionsSupported[4..5], + FastPathCompatible set_FastPathCompatible[5..6], + TlLargeSendOffloadSupported set_TlLargeSendOffloadSupported[6..7], + TlGiantSendOffloadSupported set_TlGiantSendOffloadSupported[7..8], +]} +pub type PNL_INTERFACE_OFFLOAD_ROD = *mut NL_INTERFACE_OFFLOAD_ROD; +ENUM!{enum NL_ROUTER_DISCOVERY_BEHAVIOR { + RouterDiscoveryDisabled = 0, + RouterDiscoveryEnabled = 1, + RouterDiscoveryDhcp = 2, + RouterDiscoveryUnchanged = -1i32 as u32, +}} +ENUM!{enum NL_BANDWIDTH_FLAG { + NlbwDisabled = 0, + NlbwEnabled = 1, + NlbwUnchanged = -1i32 as u32, +}} +pub type PNL_BANDWIDTH_FLAG = *mut NL_BANDWIDTH_FLAG; +STRUCT!{struct NL_PATH_BANDWIDTH_ROD { + Bandwidth: ULONG64, + Instability: ULONG64, + BandwidthPeaked: BOOLEAN, +}} +pub type PNL_PATH_BANDWIDTH_ROD = *mut NL_PATH_BANDWIDTH_ROD; +ENUM!{enum NL_NETWORK_CATEGORY { + NetworkCategoryPublic = 0, + NetworkCategoryPrivate = 1, + NetworkCategoryDomainAuthenticated = 2, + NetworkCategoryUnchanged = -1i32 as u32, + NetworkCategoryUnknown = -1i32 as u32, +}} +pub type PNL_NETWORK_CATEGORY = *mut NL_NETWORK_CATEGORY; +ENUM!{enum NL_INTERFACE_NETWORK_CATEGORY_STATE { + NlincCategoryUnknown = 0, + NlincPublic = 1, + NlincPrivate = 2, + NlincDomainAuthenticated = 3, + NlincCategoryStateMax = 4, +}} +pub type PNL_INTERFACE_NETWORK_CATEGORY_STATE = *mut NL_INTERFACE_NETWORK_CATEGORY_STATE; +pub const NET_IF_CURRENT_SESSION: ULONG = -1i32 as u32; +STRUCT!{struct NL_BANDWIDTH_INFORMATION { + Bandwidth: ULONG64, + Instability: ULONG64, + BandwidthPeaked: BOOLEAN, +}} +pub type PNL_BANDWIDTH_INFORMATION = *mut NL_BANDWIDTH_INFORMATION; diff -Nru cargo-0.44.1/vendor/winapi/src/shared/ntddndis.rs cargo-0.47.0/vendor/winapi/src/shared/ntddndis.rs --- cargo-0.44.1/vendor/winapi/src/shared/ntddndis.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/ntddndis.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,75 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +use ctypes::c_int; +use shared::ifdef::IF_MAX_STRING_SIZE; +use shared::minwindef::{UCHAR, USHORT}; +//290 +STRUCT!{struct NDIS_OBJECT_HEADER { + Type: UCHAR, + Revision: UCHAR, + Size: USHORT, +}} +pub type PNDIS_OBJECT_HEADER = *mut NDIS_OBJECT_HEADER; +//2274 +ENUM!{enum NDIS_MEDIUM { + NdisMedium802_3 = 0, + NdisMedium802_5 = 1, + NdisMediumFddi = 2, + NdisMediumWan = 3, + NdisMediumLocalTalk = 4, + NdisMediumDix = 5, + NdisMediumArcnetRaw = 6, + NdisMediumArcnet878_2 = 7, + NdisMediumAtm = 8, + NdisMediumWirelessWan = 9, + NdisMediumIrda = 10, + NdisMediumBpc = 11, + NdisMediumCoWan = 12, + NdisMedium1394 = 13, + NdisMediumInfiniBand = 14, + NdisMediumTunnel = 15, + NdisMediumNative802_11 = 16, + NdisMediumLoopback = 17, + NdisMediumWiMAX = 18, + NdisMediumIP = 19, + NdisMediumMax = 20, +}} +pub type PNDIS_MEDIUM = *mut NDIS_MEDIUM; +ENUM!{enum NDIS_PHYSICAL_MEDIUM { + NdisPhysicalMediumUnspecified = 0, + NdisPhysicalMediumWirelessLan = 1, + NdisPhysicalMediumCableModem = 2, + NdisPhysicalMediumPhoneLine = 3, + NdisPhysicalMediumPowerLine = 4, + NdisPhysicalMediumDSL = 5, + NdisPhysicalMediumFibreChannel = 6, + NdisPhysicalMedium1394 = 7, + NdisPhysicalMediumWirelessWan = 8, + NdisPhysicalMediumNative802_11 = 9, + NdisPhysicalMediumBluetooth = 10, + NdisPhysicalMediumInfiniband = 11, + NdisPhysicalMediumWiMax = 12, + NdisPhysicalMediumUWB = 13, + NdisPhysicalMedium802_3 = 14, + NdisPhysicalMedium802_5 = 15, + NdisPhysicalMediumIrda = 16, + NdisPhysicalMediumWiredWAN = 17, + NdisPhysicalMediumWiredCoWan = 18, + NdisPhysicalMediumOther = 19, + NdisPhysicalMediumMax = 20, +}} +pub type PNDIS_PHYSICAL_MEDIUM = *mut NDIS_PHYSICAL_MEDIUM; +//2691 +pub type NDIS_STATUS = c_int; +pub type PNDIS_STATUS = *mut c_int; +//2736 +pub const NDIS_PACKET_TYPE_DIRECTED: u32 = 0x00000001; +pub const NDIS_PACKET_TYPE_MULTICAST: u32 = 0x00000002; +pub const NDIS_PACKET_TYPE_ALL_MULTICAST: u32 = 0x00000004; +pub const NDIS_PACKET_TYPE_BROADCAST: u32 = 0x00000008; +pub const NDIS_PACKET_TYPE_PROMISCUOUS: u32 = 0x00000020; +//2835 +pub const NDIS_IF_MAX_STRING_SIZE: usize = IF_MAX_STRING_SIZE; diff -Nru cargo-0.44.1/vendor/winapi/src/shared/ntddscsi.rs cargo-0.47.0/vendor/winapi/src/shared/ntddscsi.rs --- cargo-0.44.1/vendor/winapi/src/shared/ntddscsi.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/ntddscsi.rs 2020-10-01 21:38:28.000000000 +0000 @@ -59,9 +59,9 @@ CTL_CODE!(IOCTL_SCSI_BASE, 0x0413, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS); pub const IOCTL_MPIO_PASS_THROUGH_PATH_DIRECT_EX: ULONG = CTL_CODE!(IOCTL_SCSI_BASE, 0x0414, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS); -pub const IOCTL_SCSI_MINIPORT_NVCACHE: ULONG = ((FILE_DEVICE_SCSI << 16) + 0x0600); -pub const IOCTL_SCSI_MINIPORT_HYBRID: ULONG = ((FILE_DEVICE_SCSI << 16) + 0x0620); -pub const IOCTL_SCSI_MINIPORT_FIRMWARE: ULONG = ((FILE_DEVICE_SCSI << 16) + 0x0780); +pub const IOCTL_SCSI_MINIPORT_NVCACHE: ULONG = (FILE_DEVICE_SCSI << 16) + 0x0600; +pub const IOCTL_SCSI_MINIPORT_HYBRID: ULONG = (FILE_DEVICE_SCSI << 16) + 0x0620; +pub const IOCTL_SCSI_MINIPORT_FIRMWARE: ULONG = (FILE_DEVICE_SCSI << 16) + 0x0780; STRUCT!{struct SCSI_PASS_THROUGH { Length: USHORT, ScsiStatus: UCHAR, diff -Nru cargo-0.44.1/vendor/winapi/src/shared/ntdef.rs cargo-0.47.0/vendor/winapi/src/shared/ntdef.rs --- cargo-0.44.1/vendor/winapi/src/shared/ntdef.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/ntdef.rs 2020-10-01 21:38:28.000000000 +0000 @@ -198,22 +198,32 @@ UNION!{union LARGE_INTEGER { [i64; 1], s s_mut: LARGE_INTEGER_s, + u u_mut: LARGE_INTEGER_u, QuadPart QuadPart_mut: LONGLONG, }} STRUCT!{struct LARGE_INTEGER_s { LowPart: ULONG, HighPart: LONG, }} +STRUCT!{struct LARGE_INTEGER_u { + LowPart: ULONG, + HighPart: LONG, +}} pub type PLARGE_INTEGER = *mut LARGE_INTEGER; UNION!{union ULARGE_INTEGER { [u64; 1], s s_mut: ULARGE_INTEGER_s, + u u_mut: ULARGE_INTEGER_s, QuadPart QuadPart_mut: ULONGLONG, }} STRUCT!{struct ULARGE_INTEGER_s { LowPart: ULONG, HighPart: ULONG, }} +STRUCT!{struct ULARGE_INTEGER_u { + LowPart: ULONG, + HighPart: ULONG, +}} pub type PULARGE_INTEGER = *mut ULARGE_INTEGER; pub type RTL_REFERENCE_COUNT = LONG_PTR; pub type PRTL_REFERENCE_COUNT = *mut RTL_REFERENCE_COUNT; diff -Nru cargo-0.44.1/vendor/winapi/src/shared/tcpestats.rs cargo-0.47.0/vendor/winapi/src/shared/tcpestats.rs --- cargo-0.44.1/vendor/winapi/src/shared/tcpestats.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/tcpestats.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,211 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +// #include +use shared::basetsd::{SIZE_T, ULONG64}; +use shared::ntdef::{BOOLEAN, UCHAR, ULONG}; +ENUM!{enum TCP_ESTATS_TYPE { + TcpConnectionEstatsSynOpts = 0, + TcpConnectionEstatsData = 1, + TcpConnectionEstatsSndCong = 2, + TcpConnectionEstatsPath = 3, + TcpConnectionEstatsSendBuff = 4, + TcpConnectionEstatsRec = 5, + TcpConnectionEstatsObsRec = 6, + TcpConnectionEstatsBandwidth = 7, + TcpConnectionEstatsFineRtt = 8, + TcpConnectionEstatsMaximum = 9, +}} +pub type PTCP_ESTATS_TYPE = *mut TCP_ESTATS_TYPE; +ENUM!{enum TCP_BOOLEAN_OPTIONAL { + TcpBoolOptDisabled = 0, + TcpBoolOptEnabled = 1, + TcpBoolOptUnchanged = -1i32 as u32, +}} +pub type PTCP_BOOLEAN_OPTIONAL = *mut TCP_BOOLEAN_OPTIONAL; +STRUCT!{struct TCP_ESTATS_SYN_OPTS_ROS_v0 { + ActiveOpen: BOOLEAN, + MssRcvd: ULONG, + MssSent: ULONG, +}} +pub type PTCP_ESTATS_SYN_OPTS_ROS_v0 = *mut TCP_ESTATS_SYN_OPTS_ROS_v0; +ENUM!{enum TCP_SOFT_ERROR { + TcpErrorNone = 0, + TcpErrorBelowDataWindow = 1, + TcpErrorAboveDataWindow = 2, + TcpErrorBelowAckWindow = 3, + TcpErrorAboveAckWindow = 4, + TcpErrorBelowTsWindow = 5, + TcpErrorAboveTsWindow = 6, + TcpErrorDataChecksumError = 7, + TcpErrorDataLengthError = 8, + TcpErrorMaxSoftError = 9, +}} +pub type PTCP_SOFT_ERROR = *mut TCP_SOFT_ERROR; +STRUCT!{struct TCP_ESTATS_DATA_ROD_v0 { + DataBytesOut: ULONG64, + DataSegsOut: ULONG64, + DataBytesIn: ULONG64, + DataSegsIn: ULONG64, + SegsOut: ULONG64, + SegsIn: ULONG64, + SoftErrors: ULONG, + SoftErrorReason: ULONG, + SndUna: ULONG, + SndNxt: ULONG, + SndMax: ULONG, + ThruBytesAcked: ULONG64, + RcvNxt: ULONG, + ThruBytesReceived: ULONG64, +}} +pub type PTCP_ESTATS_DATA_ROD_v0 = *mut TCP_ESTATS_DATA_ROD_v0; +STRUCT!{struct TCP_ESTATS_DATA_RW_v0 { + EnableCollection: BOOLEAN, +}} +pub type PTCP_ESTATS_DATA_RW_v0 = TCP_ESTATS_DATA_RW_v0; +STRUCT!{struct TCP_ESTATS_SND_CONG_ROD_v0 { + SndLimTransRwin: ULONG, + SndLimTimeRwin: ULONG, + SndLimBytesRwin: SIZE_T, + SndLimTransCwnd: ULONG, + SndLimTimeCwnd: ULONG, + SndLimBytesCwnd: SIZE_T, + SndLimTransSnd: ULONG, + SndLimTimeSnd: ULONG, + SndLimBytesSnd: SIZE_T, + SlowStart: ULONG, + CongAvoid: ULONG, + OtherReductions: ULONG, + CurCwnd: ULONG, + MaxSsCwnd: ULONG, + MaxCaCwnd: ULONG, + CurSsthresh: ULONG, + MaxSsthresh: ULONG, + MinSsthresh: ULONG, +}} +pub type PTCP_ESTATS_SND_CONG_ROD_v0 = *mut TCP_ESTATS_SND_CONG_ROD_v0; +STRUCT!{struct TCP_ESTATS_SND_CONG_ROS_v0 { + LimCwnd: ULONG, +}} +pub type PTCP_ESTATS_SND_CONG_ROS_v0 = *mut TCP_ESTATS_SND_CONG_ROS_v0; +STRUCT!{struct TCP_ESTATS_SND_CONG_RW_v0 { + EnableCollection: BOOLEAN, +}} +pub type PTCP_ESTATS_SND_CONG_RW_v0 = *mut TCP_ESTATS_SND_CONG_RW_v0; +STRUCT!{struct TCP_ESTATS_PATH_ROD_v0 { + FastRetran: ULONG, + Timeouts: ULONG, + SubsequentTimeouts: ULONG, + CurTimeoutCount: ULONG, + AbruptTimeouts: ULONG, + PktsRetrans: ULONG, + BytesRetrans: ULONG, + DupAcksIn: ULONG, + SacksRcvd: ULONG, + SackBlocksRcvd: ULONG, + CongSignals: ULONG, + PreCongSumCwnd: ULONG, + PreCongSumRtt: ULONG, + PostCongSumRtt: ULONG, + PostCongCountRtt: ULONG, + EcnSignals: ULONG, + EceRcvd: ULONG, + SendStall: ULONG, + QuenchRcvd: ULONG, + RetranThresh: ULONG, + SndDupAckEpisodes: ULONG, + SumBytesReordered: ULONG, + NonRecovDa: ULONG, + NonRecovDaEpisodes: ULONG, + AckAfterFr: ULONG, + DsackDups: ULONG, + SampleRtt: ULONG, + SmoothedRtt: ULONG, + RttVar: ULONG, + MaxRtt: ULONG, + MinRtt: ULONG, + SumRtt: ULONG, + CountRtt: ULONG, + CurRto: ULONG, + MaxRto: ULONG, + MinRto: ULONG, + CurMss: ULONG, + MaxMss: ULONG, + MinMss: ULONG, + SpuriousRtoDetections: ULONG, +}} +pub type PTCP_ESTATS_PATH_ROD_v0 = *mut TCP_ESTATS_PATH_ROD_v0; +STRUCT!{struct TCP_ESTATS_PATH_RW_v0 { + EnableCollection: BOOLEAN, +}} +pub type PTCP_ESTATS_PATH_RW_v0 = *mut TCP_ESTATS_PATH_RW_v0; +STRUCT!{struct TCP_ESTATS_SEND_BUFF_ROD_v0 { + CurRetxQueue: SIZE_T, + MaxRetxQueue: SIZE_T, + CurAppWQueue: SIZE_T, + MaxAppWQueue: SIZE_T, +}} +pub type PTCP_ESTATS_SEND_BUFF_ROD_v0 = *mut TCP_ESTATS_SEND_BUFF_ROD_v0; +STRUCT!{struct TCP_ESTATS_SEND_BUFF_RW_v0 { + EnableCollection: BOOLEAN, +}} +pub type PTCP_ESTATS_SEND_BUFF_RW_v0 = *mut TCP_ESTATS_SEND_BUFF_RW_v0; +STRUCT!{struct TCP_ESTATS_REC_ROD_v0 { + CurRwinSent: ULONG, + MaxRwinSent: ULONG, + MinRwinSent: ULONG, + LimRwin: ULONG, + DupAckEpisodes: ULONG, + DupAcksOut: ULONG, + CeRcvd: ULONG, + EcnSent: ULONG, + EcnNoncesRcvd: ULONG, + CurReasmQueue: ULONG, + MaxReasmQueue: ULONG, + CurAppRQueue: SIZE_T, + MaxAppRQueue: SIZE_T, + WinScaleSent: UCHAR, +}} +pub type PTCP_ESTATS_REC_ROD_v0 = *mut TCP_ESTATS_REC_ROD_v0; +STRUCT!{struct TCP_ESTATS_REC_RW_v0 { + EnableCollection: BOOLEAN, +}} +pub type PTCP_ESTATS_REC_RW_v0 = *mut TCP_ESTATS_REC_RW_v0; +STRUCT!{struct TCP_ESTATS_OBS_REC_ROD_v0 { + CurRwinRcvd: ULONG, + MaxRwinRcvd: ULONG, + MinRwinRcvd: ULONG, + WinScaleRcvd: UCHAR, +}} +pub type PTCP_ESTATS_OBS_REC_ROD_v0 = *mut TCP_ESTATS_OBS_REC_ROD_v0; +STRUCT!{struct TCP_ESTATS_OBS_REC_RW_v0 { + EnableCollection: BOOLEAN, +}} +pub type PTCP_ESTATS_OBS_REC_RW_v0 = *mut TCP_ESTATS_OBS_REC_RW_v0; +STRUCT!{struct TCP_ESTATS_BANDWIDTH_RW_v0 { + EnableCollectionOutbound: TCP_BOOLEAN_OPTIONAL, + EnableCollectionInbound: TCP_BOOLEAN_OPTIONAL, +}} +pub type PTCP_ESTATS_BANDWIDTH_RW_v0 = *mut TCP_ESTATS_BANDWIDTH_RW_v0; +STRUCT!{struct TCP_ESTATS_BANDWIDTH_ROD_v0 { + OutboundBandwidth: ULONG64, + InboundBandwidth: ULONG64, + OutboundInstability: ULONG64, + InboundInstability: ULONG64, + OutboundBandwidthPeaked: BOOLEAN, + InboundBandwidthPeaked: BOOLEAN, +}} +pub type PTCP_ESTATS_BANDWIDTH_ROD_v0 = *mut TCP_ESTATS_BANDWIDTH_ROD_v0; +STRUCT!{struct TCP_ESTATS_FINE_RTT_RW_v0 { + EnableCollection: BOOLEAN, +}} +pub type PTCP_ESTATS_FINE_RTT_RW_v0 = *mut TCP_ESTATS_FINE_RTT_RW_v0; +STRUCT!{struct TCP_ESTATS_FINE_RTT_ROD_v0 { + RttVar: ULONG, + MaxRtt: ULONG, + MinRtt: ULONG, + SumRtt: ULONG, +}} +pub type PTCP_ESTATS_FINE_RTT_ROD_v0 = *mut TCP_ESTATS_FINE_RTT_ROD_v0; diff -Nru cargo-0.44.1/vendor/winapi/src/shared/tcpmib.rs cargo-0.47.0/vendor/winapi/src/shared/tcpmib.rs --- cargo-0.44.1/vendor/winapi/src/shared/tcpmib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/tcpmib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,244 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +// #include +use shared::basetsd::DWORD64; +use shared::in6addr::IN6_ADDR; +use shared::minwindef::DWORD; +use shared::ntdef::{LARGE_INTEGER, UCHAR, ULONGLONG}; +const ANY_SIZE: usize = 1; +pub const TCPIP_OWNING_MODULE_SIZE: usize = 16; +ENUM!{enum MIB_TCP_STATE { + MIB_TCP_STATE_CLOSED = 1, + MIB_TCP_STATE_LISTEN = 2, + MIB_TCP_STATE_SYN_SENT = 3, + MIB_TCP_STATE_SYN_RCVD = 4, + MIB_TCP_STATE_ESTAB = 5, + MIB_TCP_STATE_FIN_WAIT1 = 6, + MIB_TCP_STATE_FIN_WAIT2 = 7, + MIB_TCP_STATE_CLOSE_WAIT = 8, + MIB_TCP_STATE_CLOSING = 9, + MIB_TCP_STATE_LAST_ACK = 10, + MIB_TCP_STATE_TIME_WAIT = 11, + MIB_TCP_STATE_DELETE_TCB = 12, + MIB_TCP_STATE_RESERVED = 100, +}} +ENUM!{enum TCP_CONNECTION_OFFLOAD_STATE { + TcpConnectionOffloadStateInHost = 0, + TcpConnectionOffloadStateOffloading = 1, + TcpConnectionOffloadStateOffloaded = 2, + TcpConnectionOffloadStateUploading = 3, + TcpConnectionOffloadStateMax = 4, +}} +pub type PTCP_CONNECTION_OFFLOAD_STATE = *mut TCP_CONNECTION_OFFLOAD_STATE; +STRUCT!{struct MIB_TCPROW_LH { + State: MIB_TCP_STATE, + dwLocalAddr: DWORD, + dwLocalPort: DWORD, + dwRemoteAddr: DWORD, + dwRemotePort: DWORD, +}} +pub type PMIB_TCPROW_LH = *mut MIB_TCPROW_LH; +STRUCT!{struct MIB_TCPROW_W2K { + dwState: DWORD, + dwLocalAddr: DWORD, + dwLocalPort: DWORD, + dwRemoteAddr: DWORD, + dwRemotePort: DWORD, +}} +pub type PMIB_TCPROW_W2K = *mut MIB_TCPROW_W2K; +pub type MIB_TCPROW = MIB_TCPROW_LH; +pub type PMIB_TCPROW = *mut MIB_TCPROW; +STRUCT!{struct MIB_TCPTABLE { + dwNumEntries: DWORD, + table: [MIB_TCPROW; ANY_SIZE], +}} +pub type PMIB_TCPTABLE = *mut MIB_TCPTABLE; +// FIXME: SIZEOF_TCPTABLE(x) +STRUCT!{struct MIB_TCPROW2 { + dwState: DWORD, + dwLocalAddr: DWORD, + dwLocalPort: DWORD, + dwRemoteAddr: DWORD, + dwRemotePort: DWORD, + dwOwningPid: DWORD, + dwOffloadState: TCP_CONNECTION_OFFLOAD_STATE, +}} +pub type PMIB_TCPROW2 = *mut MIB_TCPROW2; +STRUCT!{struct MIB_TCPTABLE2 { + dwNumEntries: DWORD, + table: [MIB_TCPROW2; ANY_SIZE], +}} +pub type PMIB_TCPTABLE2 = *mut MIB_TCPTABLE2; +// FIXME: SIZEOF_TCPTABLE2(x) +STRUCT!{struct MIB_TCPROW_OWNER_PID { + dwState: DWORD, + dwLocalAddr: DWORD, + dwLocalPort: DWORD, + dwRemoteAddr: DWORD, + dwRemotePort: DWORD, + dwOwningPid: DWORD, +}} +pub type PMIB_TCPROW_OWNER_PID = *mut MIB_TCPROW_OWNER_PID; +STRUCT!{struct MIB_TCPTABLE_OWNER_PID { + dwNumEntries: DWORD, + table: [MIB_TCPROW_OWNER_PID; ANY_SIZE], +}} +pub type PMIB_TCPTABLE_OWNER_PID = *mut MIB_TCPTABLE_OWNER_PID; +// FIXME: SIZEOF_TCPTABLE_OWNER_PID(x) +STRUCT!{struct MIB_TCPROW_OWNER_MODULE { + dwState: DWORD, + dwLocalAddr: DWORD, + dwLocalPort: DWORD, + dwRemoteAddr: DWORD, + dwRemotePort: DWORD, + dwOwningPid: DWORD, + liCreateTimestamp: LARGE_INTEGER, + OwningModuleInfo: [ULONGLONG; TCPIP_OWNING_MODULE_SIZE], +}} +pub type PMIB_TCPROW_OWNER_MODULE = *mut MIB_TCPROW_OWNER_MODULE; +STRUCT!{struct MIB_TCPTABLE_OWNER_MODULE { + dwNumEntries: DWORD, + table: [MIB_TCPROW_OWNER_MODULE; ANY_SIZE], +}} +pub type PMIB_TCPTABLE_OWNER_MODULE = *mut MIB_TCPTABLE_OWNER_MODULE; +// FIXME: SIZEOF_TCPTABLE_OWNER_MODULE(x) +STRUCT!{struct MIB_TCP6ROW { + State: MIB_TCP_STATE, + LocalAddr: IN6_ADDR, + dwLocalScopeId: DWORD, + dwLocalPort: DWORD, + RemoteAddr: IN6_ADDR, + dwRemoteScopeId: DWORD, + dwRemotePort: DWORD, +}} +pub type PMIB_TCP6ROW = *mut MIB_TCP6ROW; +STRUCT!{struct MIB_TCP6TABLE { + dwNumEntries: DWORD, + table: [MIB_TCP6ROW; ANY_SIZE], +}} +pub type PMIB_TCP6TABLE = *mut MIB_TCP6TABLE; +// FIXME: SIZEOF_TCP6TABLE(x) +STRUCT!{struct MIB_TCP6ROW2 { + LocalAddr: IN6_ADDR, + dwLocalScopeId: DWORD, + dwLocalPort: DWORD, + RemoteAddr: IN6_ADDR, + dwRemoteScopeId: DWORD, + dwRemotePort: DWORD, + State: MIB_TCP_STATE, + dwOwningPid: DWORD, + dwOffloadState: TCP_CONNECTION_OFFLOAD_STATE, +}} +pub type PMIB_TCP6ROW2 = *mut MIB_TCP6ROW2; +STRUCT!{struct MIB_TCP6TABLE2 { + dwNumEntries: DWORD, + table: [MIB_TCP6ROW2; ANY_SIZE], +}} +pub type PMIB_TCP6TABLE2 = *mut MIB_TCP6TABLE2; +// FIXME: SIZEOF_TCP6TABLE2(x) +STRUCT!{struct MIB_TCP6ROW_OWNER_PID { + ucLocalAddr: [UCHAR; 16], + dwLocalScopeId: DWORD, + dwLocalPort: DWORD, + ucRemoteAddr: [UCHAR; 16], + dwRemoteScopeId: DWORD, + dwRemotePort: DWORD, + dwState: DWORD, + dwOwningPid: DWORD, +}} +pub type PMIB_TCP6ROW_OWNER_PID = *mut MIB_TCP6ROW_OWNER_PID; +STRUCT!{struct MIB_TCP6TABLE_OWNER_PID { + dwNumEntries: DWORD, + table: [MIB_TCP6ROW_OWNER_PID; ANY_SIZE], +}} +pub type PMIB_TCP6TABLE_OWNER_PID = *mut MIB_TCP6TABLE_OWNER_PID; +// FIXME: SIZEOF_TCP6TABLE_OWNER_PID(x) +STRUCT!{struct MIB_TCP6ROW_OWNER_MODULE { + ucLocalAddr: [UCHAR; 16], + dwLocalScopeId: DWORD, + dwLocalPort: DWORD, + ucRemoteAddr: [UCHAR; 16], + dwRemoteScopeId: DWORD, + dwRemotePort: DWORD, + dwState: DWORD, + dwOwningPid: DWORD, + liCreateTimestamp: LARGE_INTEGER, + OwningModuleInfo: [ULONGLONG; TCPIP_OWNING_MODULE_SIZE], +}} +pub type PMIB_TCP6ROW_OWNER_MODULE = *mut MIB_TCP6ROW_OWNER_MODULE; +STRUCT!{struct MIB_TCP6TABLE_OWNER_MODULE { + dwNumEntries: DWORD, + table: [MIB_TCP6ROW_OWNER_MODULE; ANY_SIZE], +}} +pub type PMIB_TCP6TABLE_OWNER_MODULE = *mut MIB_TCP6TABLE_OWNER_MODULE; +// FIXME: SIZEOF_TCP6TABLE_OWNER_MODULE(x) +ENUM!{enum TCP_RTO_ALGORITHM { + TcpRtoAlgorithmOther = 1, + TcpRtoAlgorithmConstant = 2, + TcpRtoAlgorithmRsre = 3, + TcpRtoAlgorithmVanj = 4, + MIB_TCP_RTO_OTHER = 1, + MIB_TCP_RTO_CONSTANT = 2, + MIB_TCP_RTO_RSRE = 3, + MIB_TCP_RTO_VANJ = 4, +}} +pub type PTCP_RTO_ALGORITHM = *mut TCP_RTO_ALGORITHM; +STRUCT!{struct MIB_TCPSTATS_LH { + RtoAlgorithm: TCP_RTO_ALGORITHM, + dwRtoMin: DWORD, + dwRtoMax: DWORD, + dwMaxConn: DWORD, + dwActiveOpens: DWORD, + dwPassiveOpens: DWORD, + dwAttemptFails: DWORD, + dwEstabResets: DWORD, + dwCurrEstab: DWORD, + dwInSegs: DWORD, + dwOutSegs: DWORD, + dwRetransSegs: DWORD, + dwInErrs: DWORD, + dwOutRsts: DWORD, + dwNumConns: DWORD, +}} +pub type PMIB_TCPSTATS_LH = *mut MIB_TCPSTATS_LH; +STRUCT!{struct MIB_TCPSTATS_W2K { + dwRtoAlgorithm: DWORD, + dwRtoMin: DWORD, + dwRtoMax: DWORD, + dwMaxConn: DWORD, + dwActiveOpens: DWORD, + dwPassiveOpens: DWORD, + dwAttemptFails: DWORD, + dwEstabResets: DWORD, + dwCurrEstab: DWORD, + dwInSegs: DWORD, + dwOutSegs: DWORD, + dwRetransSegs: DWORD, + dwInErrs: DWORD, + dwOutRsts: DWORD, + dwNumConns: DWORD, +}} +pub type PMIB_TCPSTATS_W2K = *mut MIB_TCPSTATS_W2K; +pub type MIB_TCPSTATS = MIB_TCPSTATS_LH; +pub type PMIB_TCPSTATS = *mut MIB_TCPSTATS; +STRUCT!{struct MIB_TCPSTATS2 { + RtoAlgorithm: TCP_RTO_ALGORITHM, + dwRtoMin: DWORD, + dwRtoMax: DWORD, + dwMaxConn: DWORD, + dwActiveOpens: DWORD, + dwPassiveOpens: DWORD, + dwAttemptFails: DWORD, + dwEstabResets: DWORD, + dwCurrEstab: DWORD, + dw64InSegs: DWORD64, + dw64OutSegs: DWORD64, + dwRetransSegs: DWORD, + dwInErrs: DWORD, + dwOutRsts: DWORD, + dwNumConns: DWORD, +}} +pub type PMIB_TCPSTATS2 = *mut MIB_TCPSTATS2; diff -Nru cargo-0.44.1/vendor/winapi/src/shared/udpmib.rs cargo-0.47.0/vendor/winapi/src/shared/udpmib.rs --- cargo-0.44.1/vendor/winapi/src/shared/udpmib.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/udpmib.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,130 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +//! Contains the public definitions and structures for the UDP-specific parts of MIB-II +// #include +use shared::basetsd::DWORD64; +use shared::in6addr::IN6_ADDR; +use shared::minwindef::DWORD; +use shared::ntdef::{INT, LARGE_INTEGER, UCHAR, ULONGLONG}; +const ANY_SIZE: usize = 1; +pub const TCPIP_OWNING_MODULE_SIZE: usize = 16; +STRUCT!{struct MIB_UDPROW { + dwLocalAddr: DWORD, + dwLocalPort: DWORD, +}} +pub type PMIB_UDPROW = *mut MIB_UDPROW; +STRUCT!{struct MIB_UDPTABLE { + dwNumEntries: DWORD, + table: [MIB_UDPROW; ANY_SIZE], +}} +pub type PMIB_UDPTABLE = *mut MIB_UDPTABLE; +// FIXME: SIZEOF_UDPTABLE(x) +STRUCT!{struct MIB_UDPROW_OWNER_PID { + dwLocalAddr: DWORD, + dwLocalPort: DWORD, + dwOwningPid: DWORD, +}} +pub type PMIB_UDPROW_OWNER_PID = *mut MIB_UDPROW_OWNER_PID; +STRUCT!{struct MIB_UDPTABLE_OWNER_PID { + dwNumEntries: DWORD, + table: [MIB_UDPROW_OWNER_PID; ANY_SIZE], +}} +pub type PMIB_UDPTABLE_OWNER_PID = *mut MIB_UDPTABLE_OWNER_PID; +// FIXME: SIZEOF_UDPTABLE_OWNER_PID(x) +STRUCT!{struct MIB_UDPROW_OWNER_MODULE_u_s { + bitfield: INT, +}} +BITFIELD!{MIB_UDPROW_OWNER_MODULE_u_s bitfield: INT [ + SpecificPortBind set_SpecificPortBind[0..1], +]} +UNION!{union MIB_UDPROW_OWNER_MODULE_u { + [i32; 1], + s s_mut: MIB_UDPROW_OWNER_MODULE_u_s, + dwFlags dwFlags_mut: INT, +}} +STRUCT!{struct MIB_UDPROW_OWNER_MODULE { + dwLocalAddr: DWORD, + dwLocalPort: DWORD, + dwOwningPid: DWORD, + liCreateTimestamp: LARGE_INTEGER, + u: MIB_UDPROW_OWNER_MODULE_u, + OwningModuleInfo: [ULONGLONG; TCPIP_OWNING_MODULE_SIZE], +}} +pub type PMIB_UDPROW_OWNER_MODULE = *mut MIB_UDPROW_OWNER_MODULE; +STRUCT!{struct MIB_UDPTABLE_OWNER_MODULE { + dwNumEntries: DWORD, + table: [MIB_UDPROW_OWNER_MODULE; ANY_SIZE], +}} +pub type PMIB_UDPTABLE_OWNER_MODULE = *mut MIB_UDPTABLE_OWNER_MODULE; +// FIXME: SIZEOF_UDPTABLE_OWNER_MODULE(x) +STRUCT!{struct MIB_UDP6ROW { + dwLocalAddr: IN6_ADDR, + dwLocalScopeId: DWORD, + dwLocalPort: DWORD, +}} +pub type PMIB_UDP6ROW = *mut MIB_UDP6ROW; +STRUCT!{struct MIB_UDP6TABLE { + dwNumEntries: DWORD, + table: [MIB_UDP6ROW; ANY_SIZE], +}} +pub type PMIB_UDP6TABLE = *mut MIB_UDP6TABLE; +// FIXME: SIZEOF_UDP6TABLE(x) +STRUCT!{struct MIB_UDP6ROW_OWNER_PID { + ucLocalAddr: [UCHAR; 16], + dwLocalScopeId: DWORD, + dwLocalPort: DWORD, + dwOwningPid: DWORD, +}} +pub type PMIB_UDP6ROW_OWNER_PID = *mut MIB_UDP6ROW_OWNER_PID; +STRUCT!{struct MIB_UDP6TABLE_OWNER_PID { + dwNumEntries: DWORD, + table: [MIB_UDP6ROW_OWNER_PID; ANY_SIZE], +}} +pub type PMIB_UDP6TABLE_OWNER_PID = *mut MIB_UDP6TABLE_OWNER_PID; +// FIXME: SIZEOF_UDP6TABLE_OWNER_PID(x) +STRUCT!{struct MIB_UDP6ROW_OWNER_MODULE_u_s { + bitfield: INT, +}} +BITFIELD!{MIB_UDP6ROW_OWNER_MODULE_u_s bitfield: INT [ + SpecificPortBind set_SpecificPortBind[0..1], +]} +UNION!{union MIB_UDP6ROW_OWNER_MODULE_u { + [i32; 1], + s s_mut: INT, + dwFlags dwFlags_mut: INT, +}} +STRUCT!{struct MIB_UDP6ROW_OWNER_MODULE { + ucLocalAddr: [UCHAR; 16], + dwLocalScopeId: DWORD, + dwLocalPort: DWORD, + dwOwningPid: DWORD, + liCreateTimestamp: LARGE_INTEGER, + u: MIB_UDP6ROW_OWNER_MODULE_u, + OwningModuleInfo: [ULONGLONG; TCPIP_OWNING_MODULE_SIZE], +}} +pub type PMIB_UDP6ROW_OWNER_MODULE = *mut MIB_UDP6ROW_OWNER_MODULE; +STRUCT!{struct MIB_UDP6TABLE_OWNER_MODULE { + dwNumEntries: DWORD, + table: [MIB_UDP6ROW_OWNER_MODULE; ANY_SIZE], +}} +pub type PMIB_UDP6TABLE_OWNER_MODULE = *mut MIB_UDP6TABLE_OWNER_MODULE; +// FIXME: SIZEOF_UDP6TABLE_OWNER_MODULE(x) +STRUCT!{struct MIB_UDPSTATS { + dwInDatagrams: DWORD, + dwNoPorts: DWORD, + dwInErrors: DWORD, + dwOutDatagrams: DWORD, + dwNumAddrs: DWORD, +}} +pub type PMIB_UDPSTATS = *mut MIB_UDPSTATS; +STRUCT!{struct MIB_UDPSTATS2 { + dw64InDatagrams: DWORD64, + dwNoPorts: DWORD, + dwInErrors: DWORD, + dw64OutDatagrams: DWORD64, + dwNumAddrs: DWORD, +}} +pub type PMIB_UDPSTATS2 = *mut MIB_UDPSTATS2; diff -Nru cargo-0.44.1/vendor/winapi/src/shared/usbioctl.rs cargo-0.47.0/vendor/winapi/src/shared/usbioctl.rs --- cargo-0.44.1/vendor/winapi/src/shared/usbioctl.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/usbioctl.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,705 @@ +use shared::basetsd::ULONG64; +use shared::guiddef::GUID; +use shared::minwindef::{DWORD, UCHAR, ULONG, USHORT}; +use shared::ntdef::{BOOLEAN, HANDLE, LARGE_INTEGER, LONG, NTSTATUS, PWCHAR, WCHAR}; +use shared::usb::USBD_STATUS; +use shared::usbiodef::{ + FILE_DEVICE_USB, HCD_DIAGNOSTIC_MODE_OFF, HCD_DIAGNOSTIC_MODE_ON, HCD_DISABLE_PORT, + HCD_ENABLE_PORT, HCD_GET_DRIVERKEY_NAME, HCD_GET_ROOT_HUB_NAME, HCD_GET_STATS_1, + HCD_GET_STATS_2, USB_CYCLE_PORT, USB_DIAG_IGNORE_HUBS_OFF, USB_DIAG_IGNORE_HUBS_ON, + USB_ENABLE_PORT, USB_FAIL_GET_STATUS, USB_GET_BUSGUID_INFO, USB_GET_BUS_INFO, + USB_GET_CONTROLLER_NAME, USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION, + USB_GET_DEVICE_CHARACTERISTICS, USB_GET_DEVICE_HANDLE, USB_GET_DEVICE_HANDLE_EX, + USB_GET_FRAME_NUMBER_AND_QPC_FOR_TIME_SYNC, USB_GET_HUB_CAPABILITIES, + USB_GET_HUB_CAPABILITIES_EX, USB_GET_HUB_CONFIG_INFO, USB_GET_HUB_COUNT, + USB_GET_HUB_INFORMATION_EX, USB_GET_HUB_NAME, USB_GET_NODE_CONNECTION_ATTRIBUTES, + USB_GET_NODE_CONNECTION_DRIVERKEY_NAME, USB_GET_NODE_CONNECTION_INFORMATION, + USB_GET_NODE_CONNECTION_INFORMATION_EX, USB_GET_NODE_CONNECTION_INFORMATION_EX_V2, + USB_GET_NODE_CONNECTION_NAME, USB_GET_NODE_INFORMATION, USB_GET_PARENT_HUB_INFO, + USB_GET_PORT_CONNECTOR_PROPERTIES, USB_GET_PORT_STATUS, USB_GET_ROOTHUB_PDO, + USB_GET_TOPOLOGY_ADDRESS, USB_GET_TRANSPORT_CHARACTERISTICS, USB_GET_TT_DEVICE_HANDLE, + USB_HUB_CYCLE_PORT, USB_IDLE_NOTIFICATION, USB_IDLE_NOTIFICATION_EX, + USB_NOTIFY_ON_TRANSPORT_CHARACTERISTICS_CHANGE, USB_RECORD_FAILURE, + USB_REGISTER_COMPOSITE_DEVICE, USB_REGISTER_FOR_TRANSPORT_CHARACTERISTICS_CHANGE, + USB_REQUEST_REMOTE_WAKE_NOTIFICATION, USB_REQ_GLOBAL_RESUME, USB_REQ_GLOBAL_SUSPEND, + USB_RESET_HUB, USB_RESET_PORT, USB_START_TRACKING_FOR_TIME_SYNC, + USB_STOP_TRACKING_FOR_TIME_SYNC, USB_SUBMIT_URB, USB_UNREGISTER_COMPOSITE_DEVICE, + USB_UNREGISTER_FOR_TRANSPORT_CHARACTERISTICS_CHANGE, +}; +use shared::usbspec::{ + USB_30_HUB_DESCRIPTOR, USB_CONFIGURATION_DESCRIPTOR, USB_DEVICE_DESCRIPTOR, USB_DEVICE_SPEED, + USB_ENDPOINT_DESCRIPTOR, USB_HUB_DESCRIPTOR, +}; +use um::winioctl::{FILE_ANY_ACCESS, FILE_DEVICE_USBEX, METHOD_BUFFERED, METHOD_NEITHER}; +pub const IOCTL_INTERNAL_USB_SUBMIT_URB: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_SUBMIT_URB, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_RESET_PORT: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_RESET_PORT, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_GET_ROOTHUB_PDO: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_ROOTHUB_PDO, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const USBD_PORT_ENABLED: DWORD = 0x00000001; +pub const USBD_PORT_CONNECTED: DWORD = 0x00000002; +pub const IOCTL_INTERNAL_USB_GET_PORT_STATUS: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_PORT_STATUS, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_ENABLE_PORT: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_ENABLE_PORT, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_GET_HUB_COUNT: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_HUB_COUNT, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_CYCLE_PORT: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_CYCLE_PORT, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_GET_HUB_NAME: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_HUB_NAME, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_GET_BUS_INFO: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_BUS_INFO, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_GET_CONTROLLER_NAME: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_CONTROLLER_NAME, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_GET_BUSGUID_INFO: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_BUSGUID_INFO, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_GET_PARENT_HUB_INFO: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_PARENT_HUB_INFO, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_SUBMIT_IDLE_NOTIFICATION: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_IDLE_NOTIFICATION, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_GET_DEVICE_HANDLE: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_DEVICE_HANDLE, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_NOTIFY_IDLE_READY: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_IDLE_NOTIFICATION_EX, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_REQ_GLOBAL_SUSPEND: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_REQ_GLOBAL_SUSPEND, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_REQ_GLOBAL_RESUME: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_REQ_GLOBAL_RESUME, METHOD_NEITHER, FILE_ANY_ACCESS); +STRUCT!{struct USB_START_FAILDATA { + LengthInBytes: ULONG, + NtStatus: NTSTATUS, + UsbdStatus: USBD_STATUS, + ConnectStatus: ULONG, + DriverData: [UCHAR; 4], +}} +pub type PUSB_START_FAILDATA = *mut USB_START_FAILDATA; +pub const IOCTL_INTERNAL_USB_RECORD_FAILURE: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_RECORD_FAILURE, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_GET_DEVICE_HANDLE_EX: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_DEVICE_HANDLE_EX, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_GET_TT_DEVICE_HANDLE: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_TT_DEVICE_HANDLE, METHOD_NEITHER, FILE_ANY_ACCESS); +STRUCT!{struct USB_TOPOLOGY_ADDRESS { + PciBusNumber: ULONG, + PciDeviceNumber: ULONG, + PciFunctionNumber: ULONG, + Reserved: ULONG, + RootHubPortNumber: USHORT, + HubPortNumber: [USHORT; 5], + Reserved2: USHORT, +}} +pub type PUSB_TOPOLOGY_ADDRESS = *mut USB_TOPOLOGY_ADDRESS; +pub const IOCTL_INTERNAL_USB_GET_TOPOLOGY_ADDRESS: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_TOPOLOGY_ADDRESS, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_GET_DEVICE_CONFIG_INFO: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_HUB_CONFIG_INFO, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_REGISTER_COMPOSITE_DEVICE: DWORD + = CTL_CODE!(FILE_DEVICE_USBEX, USB_REGISTER_COMPOSITE_DEVICE, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_UNREGISTER_COMPOSITE_DEVICE: DWORD + = CTL_CODE!(FILE_DEVICE_USBEX, USB_UNREGISTER_COMPOSITE_DEVICE, METHOD_NEITHER, + FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_REQUEST_REMOTE_WAKE_NOTIFICATION: DWORD + = CTL_CODE!(FILE_DEVICE_USBEX, USB_REQUEST_REMOTE_WAKE_NOTIFICATION, METHOD_NEITHER, + FILE_ANY_ACCESS); +pub const IOCTL_INTERNAL_USB_FAIL_GET_STATUS_FROM_DEVICE: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_FAIL_GET_STATUS, METHOD_NEITHER, FILE_ANY_ACCESS); +pub const IOCTL_USB_HCD_GET_STATS_1: DWORD + = CTL_CODE!(FILE_DEVICE_USB, HCD_GET_STATS_1, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_HCD_GET_STATS_2: DWORD + = CTL_CODE!(FILE_DEVICE_USB, HCD_GET_STATS_2, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_HCD_DISABLE_PORT: DWORD + = CTL_CODE!(FILE_DEVICE_USB, HCD_DISABLE_PORT, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_HCD_ENABLE_PORT: DWORD + = CTL_CODE!(FILE_DEVICE_USB, HCD_ENABLE_PORT, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_DIAGNOSTIC_MODE_ON: DWORD + = CTL_CODE!(FILE_DEVICE_USB, HCD_DIAGNOSTIC_MODE_ON, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_DIAGNOSTIC_MODE_OFF: DWORD + = CTL_CODE!(FILE_DEVICE_USB, HCD_DIAGNOSTIC_MODE_OFF, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_GET_ROOT_HUB_NAME: DWORD + = CTL_CODE!(FILE_DEVICE_USB, HCD_GET_ROOT_HUB_NAME, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_GET_HCD_DRIVERKEY_NAME: DWORD + = CTL_CODE!(FILE_DEVICE_USB, HCD_GET_DRIVERKEY_NAME, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_GET_NODE_INFORMATION: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_NODE_INFORMATION, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_GET_NODE_CONNECTION_INFORMATION: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_NODE_CONNECTION_INFORMATION, METHOD_BUFFERED, + FILE_ANY_ACCESS); +pub const IOCTL_USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION, METHOD_BUFFERED, + FILE_ANY_ACCESS); +pub const IOCTL_USB_GET_NODE_CONNECTION_NAME: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_NODE_CONNECTION_NAME, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_DIAG_IGNORE_HUBS_ON: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_DIAG_IGNORE_HUBS_ON, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_DIAG_IGNORE_HUBS_OFF: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_DIAG_IGNORE_HUBS_OFF, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_GET_NODE_CONNECTION_DRIVERKEY_NAME: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_NODE_CONNECTION_DRIVERKEY_NAME, METHOD_BUFFERED, + FILE_ANY_ACCESS); +pub const IOCTL_USB_GET_HUB_CAPABILITIES: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_HUB_CAPABILITIES, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_HUB_CYCLE_PORT: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_HUB_CYCLE_PORT, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_GET_NODE_CONNECTION_ATTRIBUTES: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_NODE_CONNECTION_ATTRIBUTES, METHOD_BUFFERED, + FILE_ANY_ACCESS); +pub const IOCTL_USB_GET_NODE_CONNECTION_INFORMATION_EX: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_NODE_CONNECTION_INFORMATION_EX, METHOD_BUFFERED, + FILE_ANY_ACCESS); +pub const IOCTL_USB_RESET_HUB: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_RESET_HUB, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_GET_HUB_CAPABILITIES_EX: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_HUB_CAPABILITIES_EX, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_GET_HUB_INFORMATION_EX: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_HUB_INFORMATION_EX, METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_GET_PORT_CONNECTOR_PROPERTIES: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_PORT_CONNECTOR_PROPERTIES, METHOD_BUFFERED, + FILE_ANY_ACCESS); +pub const IOCTL_USB_GET_NODE_CONNECTION_INFORMATION_EX_V2: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_NODE_CONNECTION_INFORMATION_EX_V2, METHOD_BUFFERED, + FILE_ANY_ACCESS); +pub const IOCTL_USB_GET_TRANSPORT_CHARACTERISTICS: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_TRANSPORT_CHARACTERISTICS, METHOD_BUFFERED, + FILE_ANY_ACCESS); +pub const IOCTL_USB_REGISTER_FOR_TRANSPORT_CHARACTERISTICS_CHANGE: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_REGISTER_FOR_TRANSPORT_CHARACTERISTICS_CHANGE, + METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_NOTIFY_ON_TRANSPORT_CHARACTERISTICS_CHANGE: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_NOTIFY_ON_TRANSPORT_CHARACTERISTICS_CHANGE, METHOD_BUFFERED, + FILE_ANY_ACCESS); +pub const IOCTL_USB_UNREGISTER_FOR_TRANSPORT_CHARACTERISTICS_CHANGE: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_UNREGISTER_FOR_TRANSPORT_CHARACTERISTICS_CHANGE, + METHOD_BUFFERED, FILE_ANY_ACCESS); +pub const IOCTL_USB_START_TRACKING_FOR_TIME_SYNC: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_START_TRACKING_FOR_TIME_SYNC, METHOD_BUFFERED, + FILE_ANY_ACCESS); +pub const IOCTL_USB_GET_FRAME_NUMBER_AND_QPC_FOR_TIME_SYNC: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_FRAME_NUMBER_AND_QPC_FOR_TIME_SYNC, METHOD_BUFFERED, + FILE_ANY_ACCESS); +pub const IOCTL_USB_STOP_TRACKING_FOR_TIME_SYNC: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_STOP_TRACKING_FOR_TIME_SYNC, METHOD_BUFFERED, + FILE_ANY_ACCESS); +pub const IOCTL_USB_GET_DEVICE_CHARACTERISTICS: DWORD + = CTL_CODE!(FILE_DEVICE_USB, USB_GET_DEVICE_CHARACTERISTICS, METHOD_BUFFERED, FILE_ANY_ACCESS); +ENUM!{enum USB_HUB_NODE { + UsbHub, + UsbMIParent, +}} +STRUCT!{#[repr(packed)] struct USB_HUB_INFORMATION { + HubDescriptor: USB_HUB_DESCRIPTOR, + HubIsBusPowered: BOOLEAN, +}} +pub type PUSB_HUB_INFORMATION = *mut USB_HUB_INFORMATION; +STRUCT!{#[repr(packed)] struct USB_MI_PARENT_INFORMATION { + NumberOfInterfaces: ULONG, +}} +pub type PUSB_MI_PARENT_INFORMATION = *mut USB_MI_PARENT_INFORMATION; +STRUCT!{#[repr(packed)] struct USB_NODE_INFORMATION { + NodeType: USB_HUB_NODE, + u: USB_NODE_INFORMATION_u, +}} +UNION!{union USB_NODE_INFORMATION_u { + [u8; 72], + HubInformation HubInformation_mut: USB_HUB_INFORMATION, + MiParentInformation MiParentInformation_mut: USB_MI_PARENT_INFORMATION, +}} +pub type PUSB_NODE_INFORMATION = *mut USB_NODE_INFORMATION; +STRUCT!{#[repr(packed)] struct USB_PIPE_INFO { + EndpointDescriptor: USB_ENDPOINT_DESCRIPTOR, + ScheduleOffset: ULONG, +}} +pub type PUSB_PIPE_INFO = *mut USB_PIPE_INFO; +ENUM!{enum USB_CONNECTION_STATUS { + NoDeviceConnected, + DeviceConnected, + DeviceFailedEnumeration, + DeviceGeneralFailure, + DeviceCausedOvercurrent, + DeviceNotEnoughPower, + DeviceNotEnoughBandwidth, + DeviceHubNestedTooDeeply, + DeviceInLegacyHub, + DeviceEnumerating, + DeviceReset, +}} +pub type PUSB_CONNECTION_STATUS = *mut USB_CONNECTION_STATUS; +STRUCT!{#[repr(packed)] struct USB_NODE_CONNECTION_INFORMATION { + ConnectionIndex: ULONG, + DeviceDescriptor: USB_DEVICE_DESCRIPTOR, + CurrentConfigurationValue: UCHAR, + LowSpeed: BOOLEAN, + DeviceIsHub: BOOLEAN, + DeviceAddress: USHORT, + NumberOfOpenPipes: ULONG, + ConnectionStatus: USB_CONNECTION_STATUS, + PipeList: [USB_PIPE_INFO; 0], +}} +pub type PUSB_NODE_CONNECTION_INFORMATION = *mut USB_NODE_CONNECTION_INFORMATION; +STRUCT!{#[repr(packed)] struct USB_NODE_CONNECTION_DRIVERKEY_NAME { + ConnectionIndex: ULONG, + ActualLength: ULONG, + DriverKeyName: [WCHAR; 1], +}} +pub type PUSB_NODE_CONNECTION_DRIVERKEY_NAME = *mut USB_NODE_CONNECTION_DRIVERKEY_NAME; +STRUCT!{#[repr(packed)] struct USB_NODE_CONNECTION_NAME { + ConnectionIndex: ULONG, + ActualLength: ULONG, + NodeName: [WCHAR; 1], +}} +pub type PUSB_NODE_CONNECTION_NAME = *mut USB_NODE_CONNECTION_NAME; +STRUCT!{#[repr(packed)] struct USB_HUB_NAME { + ActualLength: ULONG, + HubName: [WCHAR; 1], +}} +pub type PUSB_HUB_NAME = *mut USB_HUB_NAME; +STRUCT!{#[repr(packed)] struct USB_ROOT_HUB_NAME { + ActualLength: ULONG, + RootHubName: [WCHAR; 1], +}} +pub type PUSB_ROOT_HUB_NAME = *mut USB_ROOT_HUB_NAME; +STRUCT!{#[repr(packed)] struct USB_HCD_DRIVERKEY_NAME { + ActualLength: ULONG, + DriverKeyName: [WCHAR; 1], +}} +pub type PUSB_HCD_DRIVERKEY_NAME = *mut USB_HCD_DRIVERKEY_NAME; +STRUCT!{#[repr(packed)] struct USB_DESCRIPTOR_REQUEST { + ConnectionIndex: ULONG, + SetupPacket: USB_DESCRIPTOR_REQUEST_SetupPacket, + Data: [UCHAR; 0], +}} +STRUCT!{#[repr(packed)] struct USB_DESCRIPTOR_REQUEST_SetupPacket { + bmRequest: UCHAR, + bRequest: UCHAR, + wValue: USHORT, + wIndex: USHORT, + wLength: USHORT, +}} +pub type PUSB_DESCRIPTOR_REQUEST = *mut USB_DESCRIPTOR_REQUEST; +STRUCT!{#[repr(packed)] struct USB_HUB_CAPABILITIES { + bitfield: ULONG, +}} +BITFIELD!{USB_HUB_CAPABILITIES bitfield: ULONG [ + HubIs2xCapable set_HubIs2xCapable[0..1], +]} +pub type PUSB_HUB_CAPABILITIES = *mut USB_HUB_CAPABILITIES; +STRUCT!{#[repr(packed)] struct USB_NODE_CONNECTION_ATTRIBUTES { + ConnectionIndex: ULONG, + ConnectionStatus: USB_CONNECTION_STATUS, + PortAttributes: ULONG, +}} +pub type PUSB_NODE_CONNECTION_ATTRIBUTES = *mut USB_NODE_CONNECTION_ATTRIBUTES; +STRUCT!{#[repr(packed)] struct USB_NODE_CONNECTION_INFORMATION_EX { + ConnectionIndex: ULONG, + DeviceDescriptor: USB_DEVICE_DESCRIPTOR, + CurrentConfigurationValue: UCHAR, + Speed: UCHAR, + DeviceIsHub: BOOLEAN, + DeviceAddress: USHORT, + NumberOfOpenPipes: ULONG, + ConnectionStatus: USB_CONNECTION_STATUS, + PipeList: [USB_PIPE_INFO; 0], +}} +pub type PUSB_NODE_CONNECTION_INFORMATION_EX = *mut USB_NODE_CONNECTION_INFORMATION_EX; +STRUCT!{#[repr(packed)] struct USB_HUB_CAP_FLAGS { + ul: ULONG, +}} +BITFIELD!{USB_HUB_CAP_FLAGS ul: ULONG [ + HubIsHighSpeedCapable set_HubIsHighSpeedCapable[0..1], + HubIsHighSpeed set_HubIsHighSpeed[1..2], + HubIsMultiTtCapable set_HubIsMultiTtCapable[2..3], + HubIsMultiTt set_HubIsMultiTt[3..4], + HubIsRoot set_HubIsRoot[4..5], + HubIsArmedWakeOnConnect set_HubIsArmedWakeOnConnect[5..6], + HubIsBusPowered set_HubIsBusPowered[6..7], + ReservedMBZ set_ReservedMBZ[7..32], +]} +pub type PUSB_HUB_CAP_FLAGS = *mut USB_HUB_CAP_FLAGS; +STRUCT!{#[repr(packed)] struct USB_HUB_CAPABILITIES_EX { + CapabilityFlags: USB_HUB_CAP_FLAGS, +}} +pub type PUSB_HUB_CAPABILITIES_EX = *mut USB_HUB_CAPABILITIES_EX; +STRUCT!{#[repr(packed)] struct USB_CYCLE_PORT_PARAMS { + ConnectionIndex: ULONG, + StatusReturned: ULONG, +}} +pub type PUSB_CYCLE_PORT_PARAMS = *mut USB_CYCLE_PORT_PARAMS; +STRUCT!{#[repr(packed)] struct USB_ID_STRING { + LanguageId: USHORT, + Pad: USHORT, + LengthInBytes: ULONG, + Buffer: PWCHAR, +}} +pub type PUSB_ID_STRING = *mut USB_ID_STRING; +STRUCT!{#[repr(packed)] struct USB_HUB_DEVICE_UXD_SETTINGS { + Version: ULONG, + PnpGuid: GUID, + OwnerGuid: GUID, + DeleteOnShutdown: ULONG, + DeleteOnReload: ULONG, + DeleteOnDisconnect: ULONG, + Reserved: [ULONG; 5], +}} +pub type PUSB_HUB_DEVICE_UXD_SETTINGS = *mut USB_HUB_DEVICE_UXD_SETTINGS; +STRUCT!{#[repr(packed)] struct HUB_DEVICE_CONFIG_INFO { + Version: ULONG, + Length: ULONG, + HubFlags: USB_HUB_CAP_FLAGS, + HardwareIds: USB_ID_STRING, + CompatibleIds: USB_ID_STRING, + DeviceDescription: USB_ID_STRING, + Reserved: [ULONG; 19], + UxdSettings: USB_HUB_DEVICE_UXD_SETTINGS, +}} +pub type PHUB_DEVICE_CONFIG_INFO = *mut HUB_DEVICE_CONFIG_INFO; +STRUCT!{#[repr(packed)] struct HCD_STAT_COUNTERS { + BytesTransferred: ULONG, + IsoMissedCount: USHORT, + DataOverrunErrorCount: USHORT, + CrcErrorCount: USHORT, + ScheduleOverrunCount: USHORT, + TimeoutErrorCount: USHORT, + InternalHcErrorCount: USHORT, + BufferOverrunErrorCount: USHORT, + SWErrorCount: USHORT, + StallPidCount: USHORT, + PortDisableCount: USHORT, +}} +pub type PHCD_STAT_COUNTERS = *mut HCD_STAT_COUNTERS; +STRUCT!{#[repr(packed)] struct HCD_ISO_STAT_COUNTERS { + LateUrbs: USHORT, + DoubleBufferedPackets: USHORT, + TransfersCF_5ms: USHORT, + TransfersCF_2ms: USHORT, + TransfersCF_1ms: USHORT, + MaxInterruptLatency: USHORT, + BadStartFrame: USHORT, + StaleUrbs: USHORT, + IsoPacketNotAccesed: USHORT, + IsoPacketHWError: USHORT, + SmallestUrbPacketCount: USHORT, + LargestUrbPacketCount: USHORT, + IsoCRC_Error: USHORT, + IsoOVERRUN_Error: USHORT, + IsoINTERNAL_Error: USHORT, + IsoUNKNOWN_Error: USHORT, + IsoBytesTransferred: ULONG, + LateMissedCount: USHORT, + HWIsoMissedCount: USHORT, + Reserved7: [ULONG; 8], +}} +pub type PHCD_ISO_STAT_COUNTERS = *mut HCD_ISO_STAT_COUNTERS; +STRUCT!{#[repr(packed)] struct HCD_STAT_INFORMATION_1 { + Reserved1: ULONG, + Reserved2: ULONG, + ResetCounters: ULONG, + TimeRead: LARGE_INTEGER, + Counters: HCD_STAT_COUNTERS, +}} +pub type PHCD_STAT_INFORMATION_1 = *mut HCD_STAT_INFORMATION_1; +STRUCT!{#[repr(packed)] struct HCD_STAT_INFORMATION_2 { + Reserved1: ULONG, + Reserved2: ULONG, + ResetCounters: ULONG, + TimeRead: LARGE_INTEGER, + LockedMemoryUsed: LONG, + Counters: HCD_STAT_COUNTERS, + IsoCounters: HCD_ISO_STAT_COUNTERS, +}} +pub type PHCD_STAT_INFORMATION_2 = *mut HCD_STAT_INFORMATION_2; +pub const WMI_USB_DRIVER_INFORMATION: ULONG = 0; +pub const WMI_USB_DRIVER_NOTIFICATION: ULONG = 1; +pub const WMI_USB_POWER_DEVICE_ENABLE: ULONG = 2; +pub const WMI_USB_HUB_NODE_INFORMATION: ULONG = 4; +pub const WMI_USB_PERFORMANCE_INFORMATION: ULONG = 1; +pub const WMI_USB_DEVICE_NODE_INFORMATION: ULONG = 2; +ENUM!{enum USB_NOTIFICATION_TYPE { + EnumerationFailure = 0, + InsufficentBandwidth, + InsufficentPower, + OverCurrent, + ResetOvercurrent, + AcquireBusInfo, + AcquireHubName, + AcquireControllerName, + HubOvercurrent, + HubPowerChange, + HubNestedTooDeeply, + ModernDeviceInLegacyHub, +}} +STRUCT!{#[repr(packed)] struct USB_NOTIFICATION { + NotificationType: USB_NOTIFICATION_TYPE, +}} +pub type PUSB_NOTIFICATION = *mut USB_NOTIFICATION; +STRUCT!{#[repr(packed)] struct USB_CONNECTION_NOTIFICATION { + NotificationType: USB_NOTIFICATION_TYPE, + ConnectionNumber: ULONG, + RequestedBandwidth: ULONG, + EnumerationFailReason: ULONG, + PowerRequested: ULONG, + HubNameLength: ULONG, +}} +pub type PUSB_CONNECTION_NOTIFICATION = *mut USB_CONNECTION_NOTIFICATION; +STRUCT!{#[repr(packed)] struct USB_BUS_NOTIFICATION { + NotificationType: USB_NOTIFICATION_TYPE, + TotalBandwidth: ULONG, + ConsumedBandwidth: ULONG, + ControllerNameLength: ULONG, +}} +pub type PUSB_BUS_NOTIFICATION = *mut USB_BUS_NOTIFICATION; +STRUCT!{#[repr(packed)] struct USB_ACQUIRE_INFO { + NotificationType: USB_NOTIFICATION_TYPE, + TotalSize: ULONG, + Buffer: [WCHAR; 1], +}} +pub type PUSB_ACQUIRE_INFO = *mut USB_ACQUIRE_INFO; +ENUM!{enum USB_WMI_DEVICE_NODE_TYPE { + UsbDevice, + HubDevice, + CompositeDevice, + UsbController, +}} +pub type PUSB_WMI_DEVICE_NODE_TYPE = *mut USB_WMI_DEVICE_NODE_TYPE; +STRUCT!{#[repr(packed)] struct USB_DEVICE_STATE { + bitfield: ULONG, +}} +BITFIELD!{USB_DEVICE_STATE bitfield: ULONG [ + DeviceConnected set_DeviceConnected[0..1], + DeviceStarted set_DeviceStarted[1..2], +]} +pub type PUSB_DEVICE_STATE = *mut USB_DEVICE_STATE; +STRUCT!{#[repr(packed)] struct USB_HUB_PORT_INFORMATION { + DeviceState: USB_DEVICE_STATE, + PortNumber: USHORT, + DeviceAddress: USHORT, + ConnectionIndex: ULONG, + ConnectionStatus: USB_CONNECTION_STATUS, +}} +pub type PUSB_HUB_PORT_INFORMATION = *mut USB_HUB_PORT_INFORMATION; +STRUCT!{#[repr(packed)] struct USB_HUB_DEVICE_INFO { + HubDescriptor: USB_HUB_DESCRIPTOR, + HubNumber: ULONG, + DeviceAddress: USHORT, + HubIsSelfPowered: BOOLEAN, + HubIsRootHub: BOOLEAN, + HubCapabilities: USB_HUB_CAPABILITIES, + NumberOfHubPorts: ULONG, + PortInfo: [USB_HUB_PORT_INFORMATION; 1], +}} +pub type PUSB_HUB_DEVICE_INFO = *mut USB_HUB_DEVICE_INFO; +STRUCT!{#[repr(packed)] struct USB_COMPOSITE_FUNCTION_INFO { + FunctionNumber: UCHAR, + BaseInterfaceNumber: UCHAR, + NumberOfInterfaces: UCHAR, + FunctionIsIdle: BOOLEAN, +}} +pub type PUSB_COMPOSITE_FUNCTION_INFO = *mut USB_COMPOSITE_FUNCTION_INFO; +STRUCT!{#[repr(packed)] struct USB_COMPOSITE_DEVICE_INFO { + DeviceDescriptor: USB_DEVICE_DESCRIPTOR, + CurrentConfigDescriptor: USB_CONFIGURATION_DESCRIPTOR, + CurrentConfigurationValue: UCHAR, + NumberOfFunctions: UCHAR, + FunctionInfo: [USB_COMPOSITE_FUNCTION_INFO; 1], +}} +pub type PUSB_COMPOSITE_DEVICE_INFO = *mut USB_COMPOSITE_DEVICE_INFO; +STRUCT!{#[repr(packed)] struct USB_CONTROLLER_DEVICE_INFO { + PciVendorId: ULONG, + PciDeviceId: ULONG, + PciRevision: ULONG, + NumberOfRootPorts: ULONG, + HcFeatureFlags: ULONG, +}} +pub type PUSB_CONTROLLER_DEVICE_INFO = *mut USB_CONTROLLER_DEVICE_INFO; +STRUCT!{#[repr(packed)] struct USB_DEVICE_INFO { + DeviceState: USB_DEVICE_STATE, + PortNumber: USHORT, + DeviceDescriptor: USB_DEVICE_DESCRIPTOR, + CurrentConfigurationValue: UCHAR, + Speed: USB_DEVICE_SPEED, + DeviceAddress: USHORT, + ConnectionIndex: ULONG, + ConnectionStatus: USB_CONNECTION_STATUS, + PnpHardwareId: [WCHAR; 128], + PnpCompatibleId: [WCHAR; 128], + SerialNumberId: [WCHAR; 128], + PnpDeviceDescription: [WCHAR; 128], + NumberOfOpenPipes: ULONG, + PipeList: [USB_PIPE_INFO; 1], +}} +pub type PUSB_DEVICE_INFO = *mut USB_DEVICE_INFO; +STRUCT!{#[repr(packed)] struct USB_DEVICE_NODE_INFO { + Sig: ULONG, + LengthInBytes: ULONG, + DeviceDescription: [WCHAR; 40], + NodeType: USB_WMI_DEVICE_NODE_TYPE, + BusAddress: USB_TOPOLOGY_ADDRESS, + u: USB_DEVICE_NODE_INFO_u, +}} +UNION!{union USB_DEVICE_NODE_INFO_u { + [u8; 1078], + UsbDeviceInfo UsbDeviceInfo_mut: USB_DEVICE_INFO, + HubDeviceInfo HubDeviceInfo_mut: USB_HUB_DEVICE_INFO, + CompositeDeviceInfo CompositeDeviceInfo_mut: USB_COMPOSITE_DEVICE_INFO, + ControllerDeviceInfo ControllerDeviceInfo_mut: USB_CONTROLLER_DEVICE_INFO, + DeviceInformation DeviceInformation_mut: [UCHAR; 4], +}} +pub type PUSB_DEVICE_NODE_INFO = *mut USB_DEVICE_NODE_INFO; +STRUCT!{#[repr(packed)] struct USB_DEVICE_PERFORMANCE_INFO { + BulkBytes: ULONG, + ControlDataBytes: ULONG, + IsoBytes: ULONG, + InterruptBytes: ULONG, + BulkUrbCount: ULONG, + ControlUrbCount: ULONG, + IsoUrbCount: ULONG, + InterruptUrbCount: ULONG, + AllocedInterrupt: [ULONG; 6], + AllocedIso: ULONG, + Total32secBandwidth: ULONG, + TotalTtBandwidth: ULONG, + DeviceDescription: [WCHAR; 60], + DeviceSpeed: USB_DEVICE_SPEED, + TotalIsoLatency: ULONG, + DroppedIsoPackets: ULONG, + TransferErrors: ULONG, + PciInterruptCount: ULONG, + HcIdleState: ULONG, + HcAsyncIdleState: ULONG, + HcAsyncCacheFlushCount: ULONG, + HcPeriodicIdleState: ULONG, + HcPeriodicCacheFlushCount: ULONG, +}} +pub type PUSB_DEVICE_PERFORMANCE_INFO = *mut USB_DEVICE_PERFORMANCE_INFO; +ENUM!{enum USB_HUB_TYPE { + UsbRootHub = 1, + Usb20Hub = 2, + Usb30Hub = 3, +}} +STRUCT!{#[repr(packed)] struct USB_HUB_INFORMATION_EX { + HubType: USB_HUB_TYPE, + HighestPortNumber: USHORT, + u: USB_HUB_INFORMATION_EX_u, +}} +UNION!{union USB_HUB_INFORMATION_EX_u { + [u8; 71], + UsbHubDescriptor UsbHubDescriptor_mut: USB_HUB_DESCRIPTOR, + Usb30HubDescriptor Usb30HubDescriptor_mut: USB_30_HUB_DESCRIPTOR, +}} +pub type PUSB_HUB_INFORMATION_EX = *mut USB_HUB_INFORMATION_EX; +STRUCT!{#[repr(packed)] struct USB_PORT_PROPERTIES { + ul: ULONG, +}} +BITFIELD!{USB_PORT_PROPERTIES ul: ULONG [ + PortIsUserConnectable set_PortIsUserConnectable[0..1], + PortIsDebugCapable set_PortIsDebugCapable[1..2], + PortHasMultipleCompanions set_PortHasMultipleCompanions[2..3], + PortConnectorIsTypeC set_PortConnectorIsTypeC[3..4], + ReservedMBZ set_ReservedMBZ[4..32], +]} +pub type PUSB_PORT_PROPERTIES = *mut USB_PORT_PROPERTIES; +STRUCT!{#[repr(packed)] struct USB_PORT_CONNECTOR_PROPERTIES { + ConnectionIndex: ULONG, + ActualLength: ULONG, + UsbPortProperties: USB_PORT_PROPERTIES, + CompanionIndex: USHORT, + CompanionPortNumber: USHORT, + CompanionHubSymbolicLinkName: [WCHAR; 1], +}} +pub type PUSB_PORT_CONNECTOR_PROPERTIES = *mut USB_PORT_CONNECTOR_PROPERTIES; +STRUCT!{#[repr(packed)] struct USB_PROTOCOLS { + ul: ULONG, +}} +BITFIELD!{USB_PROTOCOLS ul: ULONG [ + Usb110 set_Usb110[0..1], + Usb200 set_Usb200[1..2], + Usb300 set_Usb300[2..3], + ReservedMBZ set_ReservedMBZ[3..32], +]} +pub type PUSB_PROTOCOLS = *mut USB_PROTOCOLS; +STRUCT!{#[repr(packed)] struct USB_NODE_CONNECTION_INFORMATION_EX_V2_FLAGS { + ul: ULONG, +}} +BITFIELD!{USB_NODE_CONNECTION_INFORMATION_EX_V2_FLAGS ul: ULONG [ + DeviceIsOperatingAtSuperSpeedOrHigher set_DeviceIsOperatingAtSuperSpeedOrHigher[0..1], + DeviceIsSuperSpeedCapableOrHigher set_DeviceIsSuperSpeedCapableOrHigher[1..2], + DeviceIsOperatingAtSuperSpeedPlusOrHigher set_DeviceIsOperatingAtSuperSpeedPlusOrHigher[2..3], + DeviceIsSuperSpeedPlusCapableOrHigher set_DeviceIsSuperSpeedPlusCapableOrHigher[3..4], + ReservedMBZ set_ReservedMBZ[4..32], +]} +pub type PUSB_NODE_CONNECTION_INFORMATION_EX_V2_FLAGS + = *mut USB_NODE_CONNECTION_INFORMATION_EX_V2_FLAGS; +STRUCT!{#[repr(packed)] struct USB_NODE_CONNECTION_INFORMATION_EX_V2 { + ConnectionIndex: ULONG, + Length: ULONG, + SupportedUsbProtocols: USB_PROTOCOLS, + Flags: USB_NODE_CONNECTION_INFORMATION_EX_V2_FLAGS, +}} +pub type PUSB_NODE_CONNECTION_INFORMATION_EX_V2 = *mut USB_NODE_CONNECTION_INFORMATION_EX_V2; +pub const USB_TRANSPORT_CHARACTERISTICS_VERSION_1: ULONG = 0x01; +pub const USB_TRANSPORT_CHARACTERISTICS_LATENCY_AVAILABLE: ULONG = 0x1; +pub const USB_TRANSPORT_CHARACTERISTICS_BANDWIDTH_AVAILABLE: ULONG = 0x2; +STRUCT!{#[repr(packed)] struct USB_TRANSPORT_CHARACTERISTICS { + Version: ULONG, + TransportCharacteristicsFlags: ULONG, + CurrentRoundtripLatencyInMilliSeconds: ULONG64, + MaxPotentialBandwidth: ULONG64, +}} +pub type PUSB_TRANSPORT_CHARACTERISTICS = *mut USB_TRANSPORT_CHARACTERISTICS; +pub const USB_REGISTER_FOR_TRANSPORT_LATENCY_CHANGE: ULONG = 0x1; +pub const USB_REGISTER_FOR_TRANSPORT_BANDWIDTH_CHANGE: ULONG = 0x2; +DECLARE_HANDLE!(USB_CHANGE_REGISTRATION_HANDLE, USB_CHANGE_REGISTRATION_HANDLE__); +STRUCT!{#[repr(packed)] struct USB_TRANSPORT_CHARACTERISTICS_CHANGE_REGISTRATION { + ChangeNotificationInputFlags: ULONG, + Handle: USB_CHANGE_REGISTRATION_HANDLE, + UsbTransportCharacteristics: USB_TRANSPORT_CHARACTERISTICS, +}} +pub type PUSB_TRANSPORT_CHARACTERISTICS_CHANGE_REGISTRATION + = *mut USB_TRANSPORT_CHARACTERISTICS_CHANGE_REGISTRATION; +STRUCT!{#[repr(packed)] struct USB_TRANSPORT_CHARACTERISTICS_CHANGE_NOTIFICATION { + Handle: USB_CHANGE_REGISTRATION_HANDLE, + UsbTransportCharacteristics: USB_TRANSPORT_CHARACTERISTICS, +}} +pub type PUSB_TRANSPORT_CHARACTERISTICS_CHANGE_NOTIFICATION + = *mut USB_TRANSPORT_CHARACTERISTICS_CHANGE_NOTIFICATION; +STRUCT!{#[repr(packed)] struct USB_TRANSPORT_CHARACTERISTICS_CHANGE_UNREGISTRATION { + Handle: USB_CHANGE_REGISTRATION_HANDLE, +}} +pub type PUSB_TRANSPORT_CHARACTERISTICS_CHANGE_UNREGISTRATION + = *mut USB_TRANSPORT_CHARACTERISTICS_CHANGE_UNREGISTRATION; +pub const USB_DEVICE_CHARACTERISTICS_VERSION_1: ULONG = 0x01; +pub const USB_DEVICE_CHARACTERISTICS_MAXIMUM_PATH_DELAYS_AVAILABLE: ULONG = 0x1; +STRUCT!{#[repr(packed)] struct USB_DEVICE_CHARACTERISTICS { + Version: ULONG, + Reserved: [ULONG; 2], + UsbDeviceCharacteristicsFlags: ULONG, + MaximumSendPathDelayInMilliSeconds: ULONG, + MaximumCompletionPathDelayInMilliSeconds: ULONG, +}} +pub type PUSB_DEVICE_CHARACTERISTICS = *mut USB_DEVICE_CHARACTERISTICS; +STRUCT!{#[repr(packed)] struct USB_START_TRACKING_FOR_TIME_SYNC_INFORMATION { + TimeTrackingHandle: HANDLE, + IsStartupDelayTolerable: BOOLEAN, +}} +pub type PUSB_START_TRACKING_FOR_TIME_SYNC_INFORMATION + = *mut USB_START_TRACKING_FOR_TIME_SYNC_INFORMATION; +STRUCT!{#[repr(packed)] struct USB_STOP_TRACKING_FOR_TIME_SYNC_INFORMATION { + TimeTrackingHandle: HANDLE, +}} +pub type PUSB_STOP_TRACKING_FOR_TIME_SYNC_INFORMATION + = *mut USB_STOP_TRACKING_FOR_TIME_SYNC_INFORMATION; +STRUCT!{#[repr(packed)] struct USB_FRAME_NUMBER_AND_QPC_FOR_TIME_SYNC_INFORMATION { + TimeTrackingHandle: HANDLE, + InputFrameNumber: ULONG, + InputMicroFrameNumber: ULONG, + QueryPerformanceCounterAtInputFrameOrMicroFrame: LARGE_INTEGER, + QueryPerformanceCounterFrequency: LARGE_INTEGER, + PredictedAccuracyInMicroSeconds: ULONG, + CurrentGenerationID: ULONG, + CurrentQueryPerformanceCounter: LARGE_INTEGER, + CurrentHardwareFrameNumber: ULONG, + CurrentHardwareMicroFrameNumber: ULONG, + CurrentUSBFrameNumber: ULONG, +}} +pub type PUSB_FRAME_NUMBER_AND_QPC_FOR_TIME_SYNC_INFORMATION + = *mut USB_FRAME_NUMBER_AND_QPC_FOR_TIME_SYNC_INFORMATION; diff -Nru cargo-0.44.1/vendor/winapi/src/shared/usbiodef.rs cargo-0.47.0/vendor/winapi/src/shared/usbiodef.rs --- cargo-0.44.1/vendor/winapi/src/shared/usbiodef.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/usbiodef.rs 2020-10-01 21:38:28.000000000 +0000 @@ -63,6 +63,14 @@ pub const USB_GET_HUB_INFORMATION_EX: ULONG = 277; pub const USB_GET_PORT_CONNECTOR_PROPERTIES: ULONG = 278; pub const USB_GET_NODE_CONNECTION_INFORMATION_EX_V2: ULONG = 279; +pub const USB_GET_TRANSPORT_CHARACTERISTICS: ULONG = 281; +pub const USB_REGISTER_FOR_TRANSPORT_CHARACTERISTICS_CHANGE: ULONG = 282; +pub const USB_NOTIFY_ON_TRANSPORT_CHARACTERISTICS_CHANGE: ULONG = 283; +pub const USB_UNREGISTER_FOR_TRANSPORT_CHARACTERISTICS_CHANGE: ULONG = 284; +pub const USB_START_TRACKING_FOR_TIME_SYNC: ULONG = 285; +pub const USB_GET_FRAME_NUMBER_AND_QPC_FOR_TIME_SYNC: ULONG = 286; +pub const USB_STOP_TRACKING_FOR_TIME_SYNC: ULONG = 287; +pub const USB_GET_DEVICE_CHARACTERISTICS: ULONG = 288; DEFINE_GUID!{GUID_DEVINTERFACE_USB_HUB, 0xf18a0e88, 0xc30c, 0x11d0, 0x88, 0x15, 0x00, 0xa0, 0xc9, 0x06, 0xbe, 0xd8} DEFINE_GUID!{GUID_DEVINTERFACE_USB_DEVICE, diff -Nru cargo-0.44.1/vendor/winapi/src/shared/usbscan.rs cargo-0.47.0/vendor/winapi/src/shared/usbscan.rs --- cargo-0.44.1/vendor/winapi/src/shared/usbscan.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/usbscan.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,21 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms +use shared::ntdef::{ULONG, USHORT}; +use um::winioctl::{FILE_ANY_ACCESS, METHOD_BUFFERED}; +//98 +STRUCT!{struct DEVICE_DESCRIPTOR { + usVendorId: USHORT, + usProductId: USHORT, + usBcdDevice: USHORT, + usLanguageId: USHORT, +}} +pub type PDEVICE_DESCRIPTOR = *mut DEVICE_DESCRIPTOR; +//132 +pub const FILE_DEVICE_USB_SCAN: ULONG = 0x8000; +pub const IOCTL_INDEX: ULONG = 0x0800; +//143 +pub const IOCTL_GET_USB_DESCRIPTOR: ULONG + = CTL_CODE!(FILE_DEVICE_USB_SCAN, IOCTL_INDEX + 8, METHOD_BUFFERED, FILE_ANY_ACCESS); diff -Nru cargo-0.44.1/vendor/winapi/src/shared/windot11.rs cargo-0.47.0/vendor/winapi/src/shared/windot11.rs --- cargo-0.44.1/vendor/winapi/src/shared/windot11.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/windot11.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,2853 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +//! Definitions for native 802.11 miniport driver specifications. +use shared::basetsd::UINT8; +use shared::minwindef::{UCHAR, ULONG, USHORT}; +use shared::ntddndis::{ + NDIS_OBJECT_HEADER, NDIS_PACKET_TYPE_ALL_MULTICAST, NDIS_PACKET_TYPE_BROADCAST, + NDIS_PACKET_TYPE_DIRECTED, NDIS_PACKET_TYPE_MULTICAST, NDIS_PACKET_TYPE_PROMISCUOUS, + NDIS_STATUS, +}; +use shared::wlantypes::{ + DOT11_AUTH_ALGORITHM, DOT11_AUTH_CIPHER_PAIR, DOT11_BSS_TYPE, DOT11_CIPHER_ALGORITHM, + DOT11_SSID, PDOT11_AUTH_CIPHER_PAIR, +}; +use um::winnt::{BOOLEAN, HANDLE, LARGE_INTEGER, LONG, PVOID, ULONGLONG}; +pub type DOT11_MAC_ADDRESS = [UCHAR; 6]; +pub type PDOT11_MAC_ADDRESS = *mut DOT11_MAC_ADDRESS; +STRUCT!{struct DOT11_BSSID_LIST { + Header: NDIS_OBJECT_HEADER, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + BSSIDs: [DOT11_MAC_ADDRESS; 1], +}} +pub type PDOT11_BSSID_LIST = *mut DOT11_BSSID_LIST; +pub const DOT11_BSSID_LIST_REVISION_1: UCHAR = 1; +pub const DOT11_HESSID_LENGTH: usize = 6; +pub type DOT11_HESSID = [UCHAR; DOT11_HESSID_LENGTH]; +pub type PDOT11_HESSID = *mut DOT11_HESSID; +ENUM!{enum DOT11_PHY_TYPE { + dot11_phy_type_unknown = 0, + dot11_phy_type_any = dot11_phy_type_unknown, + dot11_phy_type_fhss = 1, + dot11_phy_type_dsss = 2, + dot11_phy_type_irbaseband = 3, + dot11_phy_type_ofdm = 4, + dot11_phy_type_hrdsss = 5, + dot11_phy_type_erp = 6, + dot11_phy_type_ht = 7, + dot11_phy_type_vht = 8, + dot11_phy_type_dmg = 9, + dot11_phy_type_IHV_start = 0x80000000, + dot11_phy_type_IHV_end = 0xffffffff, +}} +pub type PDOT11_PHY_TYPE = *mut DOT11_PHY_TYPE; +pub const DOT11_RATE_SET_MAX_LENGTH: usize = 126; +STRUCT!{struct DOT11_RATE_SET { + uRateSetLength: ULONG, + ucRateSet: [UCHAR; DOT11_RATE_SET_MAX_LENGTH], +}} +pub type PDOT11_RATE_SET = *mut DOT11_RATE_SET; +pub type DOT11_COUNTRY_OR_REGION_STRING = [UCHAR; 3]; +pub type PDOT11_COUNTRY_OR_REGION_STRING = *mut DOT11_COUNTRY_OR_REGION_STRING; +pub type DOT11_DIALOG_TOKEN = UCHAR; +pub type DOT11_WFD_STATUS_CODE = UCHAR; +pub type DOT11_WFD_MINOR_REASON_CODE = UCHAR; +pub type DOT11_WFD_SERVICE_HASH = [UCHAR; 6]; +pub const DOT11_WFD_SERVICE_NAME_MAX_LENGTH: usize = 255; +pub const DOT11_WFD_APS2_SERVICE_TYPE_MAX_LENGTH: usize = 21; +pub const DOT11_WFD_ASP2_INSTANCE_NAME_MAX_LENGTH: usize = 63; +pub const DOT11_WFD_SERVICE_INFORMATION_MAX_LENGTH: usize = 65535; +pub const DOT11_MAX_REQUESTED_SERVICE_INFORMATION_LENGTH: usize = 255; +pub const DOT11_WFD_SESSION_INFO_MAX_LENGTH: usize = 144; +STRUCT!{struct DOT11_WFD_SESSION_INFO { + uSessionInfoLength: USHORT, + ucSessionInfo: [UCHAR; DOT11_WFD_SESSION_INFO_MAX_LENGTH], +}} +pub type PDOT11_WFD_SESSION_INFO = *mut DOT11_WFD_SESSION_INFO; +pub const NDIS_PACKET_TYPE_MEDIA_SPECIFIC_MASK: u32 = 0x0fff0000; +pub const NDIS_PACKET_TYPE_802_11_DIRECTED_DATA: u32 = NDIS_PACKET_TYPE_DIRECTED; +pub const NDIS_PACKET_TYPE_802_11_BROADCAST_DATA: u32 = NDIS_PACKET_TYPE_BROADCAST; +pub const NDIS_PACKET_TYPE_802_11_MULTICAST_DATA: u32 = NDIS_PACKET_TYPE_MULTICAST; +pub const NDIS_PACKET_TYPE_802_11_ALL_MULTICAST_DATA: u32 = NDIS_PACKET_TYPE_ALL_MULTICAST; +pub const NDIS_PACKET_TYPE_802_11_PROMISCUOUS_DATA: u32 = NDIS_PACKET_TYPE_PROMISCUOUS; +pub const NDIS_PACKET_TYPE_802_11_RAW_DATA: u32 = 0x00010000; +pub const NDIS_PACKET_TYPE_802_11_DIRECTED_MGMT: u32 = 0x00020000; +pub const NDIS_PACKET_TYPE_802_11_BROADCAST_MGMT: u32 = 0x00040000; +pub const NDIS_PACKET_TYPE_802_11_MULTICAST_MGMT: u32 = 0x00080000; +pub const NDIS_PACKET_TYPE_802_11_ALL_MULTICAST_MGMT: u32 = 0x00100000; +pub const NDIS_PACKET_TYPE_802_11_PROMISCUOUS_MGMT: u32 = 0x00200000; +pub const NDIS_PACKET_TYPE_802_11_RAW_MGMT: u32 = 0x00400000; +pub const NDIS_PACKET_TYPE_802_11_DIRECTED_CTRL: u32 = 0x00800000; +pub const NDIS_PACKET_TYPE_802_11_BROADCAST_CTRL: u32 = 0x01000000; +pub const NDIS_PACKET_TYPE_802_11_PROMISCUOUS_CTRL: u32 = 0x02000000; +pub const NDIS_PACKET_TYPE_ALL_802_11_FILTERS: u32 = NDIS_PACKET_TYPE_DIRECTED + | NDIS_PACKET_TYPE_MULTICAST | NDIS_PACKET_TYPE_ALL_MULTICAST | NDIS_PACKET_TYPE_BROADCAST + | NDIS_PACKET_TYPE_PROMISCUOUS | NDIS_PACKET_TYPE_802_11_RAW_DATA + | NDIS_PACKET_TYPE_802_11_DIRECTED_MGMT | NDIS_PACKET_TYPE_802_11_BROADCAST_MGMT + | NDIS_PACKET_TYPE_802_11_MULTICAST_MGMT | NDIS_PACKET_TYPE_802_11_ALL_MULTICAST_MGMT + | NDIS_PACKET_TYPE_802_11_PROMISCUOUS_MGMT | NDIS_PACKET_TYPE_802_11_RAW_MGMT + | NDIS_PACKET_TYPE_802_11_DIRECTED_CTRL | NDIS_PACKET_TYPE_802_11_BROADCAST_CTRL + | NDIS_PACKET_TYPE_802_11_PROMISCUOUS_CTRL; +pub const DOT11_MAX_PDU_SIZE: i32 = 2346; +pub const DOT11_MIN_PDU_SIZE: i32 = 256; +pub const DOT11_MAX_NUM_DEFAULT_KEY: i32 = 4; +pub const DOT11_MAX_NUM_DEFAULT_KEY_MFP: i32 = DOT11_MAX_NUM_DEFAULT_KEY + 2; +pub const OID_DOT11_NDIS_START: u32 = 0x0D010300; +pub const NWF_MANDATORY_OID: u32 = 0x01; +pub const NWF_OPTIONAL_OID: u32 = 0x02; +pub const NWF_OPERATIONAL_OID: u32 = 0x01; +pub const NWF_STATISTICS_OID: u32 = 0x02; +#[inline] +pub fn NWF_DEFINE_OID(Seq: u32, o: u32, m: u32) -> u32 { + 0x0E000000 | (o << 16) | (m << 8) | Seq +} +macro_rules! NWF_DEFINE_OID { + ($Seq:expr, $o:expr, $m:expr) => { 0x0E000000 | $o << 16 | $m << 8 | $Seq }; +} +pub const OID_DOT11_OFFLOAD_CAPABILITY: u32 = OID_DOT11_NDIS_START + 0; +pub const DOT11_HW_WEP_SUPPORTED_TX: u32 = 0x00000001; +pub const DOT11_HW_WEP_SUPPORTED_RX: u32 = 0x00000002; +pub const DOT11_HW_FRAGMENTATION_SUPPORTED: u32 = 0x00000004; +pub const DOT11_HW_DEFRAGMENTATION_SUPPORTED: u32 = 0x00000008; +pub const DOT11_HW_MSDU_AUTH_SUPPORTED_TX: u32 = 0x00000010; +pub const DOT11_HW_MSDU_AUTH_SUPPORTED_RX: u32 = 0x00000020; +pub const DOT11_CONF_ALGO_WEP_RC4: u32 = 0x00000001; +pub const DOT11_CONF_ALGO_TKIP: u32 = 0x00000002; +pub const DOT11_AUTH_ALGO_MICHAEL: u32 = 0x00000001; +STRUCT!{struct DOT11_OFFLOAD_CAPABILITY { + uReserved: ULONG, + uFlags: ULONG, + uSupportedWEPAlgorithms: ULONG, + uNumOfReplayWindows: ULONG, + uMaxWEPKeyMappingLength: ULONG, + uSupportedAuthAlgorithms: ULONG, + uMaxAuthKeyMappingLength: ULONG, +}} +pub type PDOT11_OFFLOAD_CAPABILITY = *mut DOT11_OFFLOAD_CAPABILITY; +pub const OID_DOT11_CURRENT_OFFLOAD_CAPABILITY: u32 = OID_DOT11_NDIS_START + 1; +STRUCT!{struct DOT11_CURRENT_OFFLOAD_CAPABILITY { + uReserved: ULONG, + uFlags: ULONG, +}} +pub type PDOT11_CURRENT_OFFLOAD_CAPABILITY = *mut DOT11_CURRENT_OFFLOAD_CAPABILITY; +pub const OID_DOT11_WEP_OFFLOAD: u32 = OID_DOT11_NDIS_START + 2; +ENUM!{enum DOT11_OFFLOAD_TYPE { + dot11_offload_type_wep = 1, + dot11_offload_type_auth = 2, +}} +pub type PDOT11_OFFLOAD_TYPE = *mut DOT11_OFFLOAD_TYPE; +STRUCT!{struct DOT11_IV48_COUNTER { + uIV32Counter: ULONG, + usIV16Counter: USHORT, +}} +pub type PDOT11_IV48_COUNTER = *mut DOT11_IV48_COUNTER; +STRUCT!{struct DOT11_WEP_OFFLOAD { + uReserved: ULONG, + hOffloadContext: HANDLE, + hOffload: HANDLE, + dot11OffloadType: DOT11_OFFLOAD_TYPE, + dwAlgorithm: ULONG, + bRowIsOutbound: BOOLEAN, + bUseDefault: BOOLEAN, + uFlags: ULONG, + ucMacAddress: [UCHAR; 6], + uNumOfRWsOnPeer: ULONG, + uNumOfRWsOnMe: ULONG, + dot11IV48Counters: [DOT11_IV48_COUNTER; 16], + usDot11RWBitMaps: [USHORT; 16], + usKeyLength: USHORT, + ucKey: [UCHAR; 1], +}} +pub type PDOT11_WEP_OFFLOAD = *mut DOT11_WEP_OFFLOAD; +pub const OID_DOT11_WEP_UPLOAD: u32 = OID_DOT11_NDIS_START + 3; +STRUCT!{struct DOT11_WEP_UPLOAD { + uReserved: ULONG, + dot11OffloadType: DOT11_OFFLOAD_TYPE, + hOffload: HANDLE, + uNumOfRWsUsed: ULONG, + dot11IV48Counters: [DOT11_IV48_COUNTER; 16], + usDot11RWBitMaps: [USHORT; 16], +}} +pub type PDOT11_WEP_UPLOAD = *mut DOT11_WEP_UPLOAD; +pub const OID_DOT11_DEFAULT_WEP_OFFLOAD: u32 = OID_DOT11_NDIS_START + 4; +ENUM!{enum DOT11_KEY_DIRECTION { + dot11_key_direction_both = 1, + dot11_key_direction_inbound = 2, + dot11_key_direction_outbound = 3, +}} +pub type PDOT11_KEY_DIRECTION = *mut DOT11_KEY_DIRECTION; +STRUCT!{struct DOT11_DEFAULT_WEP_OFFLOAD { + uReserved: ULONG, + hOffloadContext: HANDLE, + hOffload: HANDLE, + dwIndex: ULONG, + dot11OffloadType: DOT11_OFFLOAD_TYPE, + dwAlgorithm: ULONG, + uFlags: ULONG, + dot11KeyDirection: DOT11_KEY_DIRECTION, + ucMacAddress: [UCHAR; 6], + uNumOfRWsOnMe: ULONG, + dot11IV48Counters: [DOT11_IV48_COUNTER; 16], + usDot11RWBitMaps: [USHORT; 16], + usKeyLength: USHORT, + ucKey: [UCHAR; 1], +}} +pub type PDOT11_DEFAULT_WEP_OFFLOAD = *mut DOT11_DEFAULT_WEP_OFFLOAD; +pub const OID_DOT11_DEFAULT_WEP_UPLOAD: u32 = OID_DOT11_NDIS_START + 5; +STRUCT!{struct DOT11_DEFAULT_WEP_UPLOAD { + uReserved: ULONG, + dot11OffloadType: DOT11_OFFLOAD_TYPE, + hOffload: HANDLE, + uNumOfRWsUsed: ULONG, + dot11IV48Counters: [DOT11_IV48_COUNTER; 16], + usDot11RWBitMaps: [USHORT; 16], +}} +pub type PDOT11_DEFAULT_WEP_UPLOAD = *mut DOT11_DEFAULT_WEP_UPLOAD; +pub const OID_DOT11_MPDU_MAX_LENGTH: u32 = OID_DOT11_NDIS_START + 6; +pub const OID_DOT11_OPERATION_MODE_CAPABILITY: u32 = OID_DOT11_NDIS_START + 7; +pub const DOT11_OPERATION_MODE_UNKNOWN: ULONG = 0x00000000; +pub const DOT11_OPERATION_MODE_STATION: ULONG = 0x00000001; +pub const DOT11_OPERATION_MODE_AP: ULONG = 0x00000002; +pub const DOT11_OPERATION_MODE_EXTENSIBLE_STATION: ULONG = 0x00000004; +pub const DOT11_OPERATION_MODE_EXTENSIBLE_AP: ULONG = 0x00000008; +pub const DOT11_OPERATION_MODE_WFD_DEVICE: ULONG = 0x00000010; +pub const DOT11_OPERATION_MODE_WFD_GROUP_OWNER: ULONG = 0x00000020; +pub const DOT11_OPERATION_MODE_WFD_CLIENT: ULONG = 0x00000040; +pub const DOT11_OPERATION_MODE_MANUFACTURING: ULONG = 0x40000000; +pub const DOT11_OPERATION_MODE_NETWORK_MONITOR: ULONG = 0x80000000; +STRUCT!{struct DOT11_OPERATION_MODE_CAPABILITY { + uReserved: ULONG, + uMajorVersion: ULONG, + uMinorVersion: ULONG, + uNumOfTXBuffers: ULONG, + uNumOfRXBuffers: ULONG, + uOpModeCapability: ULONG, +}} +pub type PDOT11_OPERATION_MODE_CAPABILITY = *mut DOT11_OPERATION_MODE_CAPABILITY; +pub const OID_DOT11_CURRENT_OPERATION_MODE: u32 = OID_DOT11_NDIS_START + 8; +STRUCT!{struct DOT11_CURRENT_OPERATION_MODE { + uReserved: ULONG, + uCurrentOpMode: ULONG, +}} +pub type PDOT11_CURRENT_OPERATION_MODE = *mut DOT11_CURRENT_OPERATION_MODE; +pub const OID_DOT11_CURRENT_PACKET_FILTER: u32 = OID_DOT11_NDIS_START + 9; +pub const DOT11_PACKET_TYPE_DIRECTED_CTRL: u32 = 0x00000001; +pub const DOT11_PACKET_TYPE_DIRECTED_MGMT: u32 = 0x00000002; +pub const DOT11_PACKET_TYPE_DIRECTED_DATA: u32 = 0x00000004; +pub const DOT11_PACKET_TYPE_MULTICAST_CTRL: u32 = 0x00000008; +pub const DOT11_PACKET_TYPE_MULTICAST_MGMT: u32 = 0x00000010; +pub const DOT11_PACKET_TYPE_MULTICAST_DATA: u32 = 0x00000020; +pub const DOT11_PACKET_TYPE_BROADCAST_CTRL: u32 = 0x00000040; +pub const DOT11_PACKET_TYPE_BROADCAST_MGMT: u32 = 0x00000080; +pub const DOT11_PACKET_TYPE_BROADCAST_DATA: u32 = 0x00000100; +pub const DOT11_PACKET_TYPE_PROMISCUOUS_CTRL: u32 = 0x00000200; +pub const DOT11_PACKET_TYPE_PROMISCUOUS_MGMT: u32 = 0x00000400; +pub const DOT11_PACKET_TYPE_PROMISCUOUS_DATA: u32 = 0x00000800; +pub const DOT11_PACKET_TYPE_ALL_MULTICAST_CTRL: u32 = 0x00001000; +pub const DOT11_PACKET_TYPE_ALL_MULTICAST_MGMT: u32 = 0x00002000; +pub const DOT11_PACKET_TYPE_ALL_MULTICAST_DATA: u32 = 0x00004000; +pub const DOT11_PACKET_TYPE_RESERVED: u32 = !(DOT11_PACKET_TYPE_DIRECTED_CTRL + | DOT11_PACKET_TYPE_DIRECTED_MGMT | DOT11_PACKET_TYPE_DIRECTED_DATA + | DOT11_PACKET_TYPE_MULTICAST_CTRL | DOT11_PACKET_TYPE_MULTICAST_MGMT + | DOT11_PACKET_TYPE_MULTICAST_DATA | DOT11_PACKET_TYPE_BROADCAST_CTRL + | DOT11_PACKET_TYPE_BROADCAST_MGMT | DOT11_PACKET_TYPE_BROADCAST_DATA + | DOT11_PACKET_TYPE_PROMISCUOUS_CTRL | DOT11_PACKET_TYPE_PROMISCUOUS_MGMT + | DOT11_PACKET_TYPE_PROMISCUOUS_DATA | DOT11_PACKET_TYPE_ALL_MULTICAST_CTRL + | DOT11_PACKET_TYPE_ALL_MULTICAST_MGMT | DOT11_PACKET_TYPE_ALL_MULTICAST_DATA | 0); +pub const OID_DOT11_ATIM_WINDOW: u32 = OID_DOT11_NDIS_START + 10; +pub const OID_DOT11_SCAN_REQUEST: u32 = OID_DOT11_NDIS_START + 11; +ENUM!{enum DOT11_SCAN_TYPE { + dot11_scan_type_active = 1, + dot11_scan_type_passive = 2, + dot11_scan_type_auto = 3, + dot11_scan_type_forced = 0x80000000, +}} +pub type PDOT11_SCAN_TYPE = *mut DOT11_SCAN_TYPE; +STRUCT!{struct DOT11_SCAN_REQUEST { + dot11BSSType: DOT11_BSS_TYPE, + dot11BSSID: DOT11_MAC_ADDRESS, + dot11SSID: DOT11_SSID, + dot11ScanType: DOT11_SCAN_TYPE, + bRestrictedScan: BOOLEAN, + bUseRequestIE: BOOLEAN, + uRequestIDsOffset: ULONG, + uNumOfRequestIDs: ULONG, + uPhyTypesOffset: ULONG, + uNumOfPhyTypes: ULONG, + uIEsOffset: ULONG, + uIEsLength: ULONG, + ucBuffer: [UCHAR; 1], +}} +pub type PDOT11_SCAN_REQUEST = *mut DOT11_SCAN_REQUEST; +ENUM!{enum CH_DESCRIPTION_TYPE { + ch_description_type_logical = 1, + ch_description_type_center_frequency = 2, + ch_description_type_phy_specific = 3, +}} +pub type PCH_DESCRIPTION_TYPE = *mut CH_DESCRIPTION_TYPE; +STRUCT!{struct DOT11_PHY_TYPE_INFO { + dot11PhyType: DOT11_PHY_TYPE, + bUseParameters: BOOLEAN, + uProbeDelay: ULONG, + uMinChannelTime: ULONG, + uMaxChannelTime: ULONG, + ChDescriptionType: CH_DESCRIPTION_TYPE, + uChannelListSize: ULONG, + ucChannelListBuffer: [UCHAR; 1], +}} +pub type PDOT11_PHY_TYPE_INFO = *mut DOT11_PHY_TYPE_INFO; +STRUCT!{struct DOT11_SCAN_REQUEST_V2 { + dot11BSSType: DOT11_BSS_TYPE, + dot11BSSID: DOT11_MAC_ADDRESS, + dot11ScanType: DOT11_SCAN_TYPE, + bRestrictedScan: BOOLEAN, + udot11SSIDsOffset: ULONG, + uNumOfdot11SSIDs: ULONG, + bUseRequestIE: BOOLEAN, + uRequestIDsOffset: ULONG, + uNumOfRequestIDs: ULONG, + uPhyTypeInfosOffset: ULONG, + uNumOfPhyTypeInfos: ULONG, + uIEsOffset: ULONG, + uIEsLength: ULONG, + ucBuffer: [UCHAR; 1], +}} +pub type PDOT11_SCAN_REQUEST_V2 = *mut DOT11_SCAN_REQUEST_V2; +pub const OID_DOT11_CURRENT_PHY_TYPE: u32 = OID_DOT11_NDIS_START + 12; +STRUCT!{struct DOT11_PHY_TYPE_LIST { + Header: NDIS_OBJECT_HEADER, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + dot11PhyType: [DOT11_PHY_TYPE; 1], +}} +pub type PDOT11_PHY_TYPE_LIST = *mut DOT11_PHY_TYPE_LIST; +pub const DOT11_PHY_TYPE_LIST_REVISION_1: UCHAR = 1; +pub const OID_DOT11_JOIN_REQUEST: u32 = OID_DOT11_NDIS_START + 13; +pub const DOT11_CAPABILITY_INFO_ESS: USHORT = 0x0001; +pub const DOT11_CAPABILITY_INFO_IBSS: USHORT = 0x0002; +pub const DOT11_CAPABILITY_INFO_CF_POLLABLE: USHORT = 0x0004; +pub const DOT11_CAPABILITY_INFO_CF_POLL_REQ: USHORT = 0x0008; +pub const DOT11_CAPABILITY_INFO_PRIVACY: USHORT = 0x0010; +pub const DOT11_CAPABILITY_SHORT_PREAMBLE: USHORT = 0x0020; +pub const DOT11_CAPABILITY_PBCC: USHORT = 0x0040; +pub const DOT11_CAPABILITY_CHANNEL_AGILITY: USHORT = 0x0080; +pub const DOT11_CAPABILITY_SHORT_SLOT_TIME: USHORT = 0x0400; +pub const DOT11_CAPABILITY_DSSSOFDM: USHORT = 0x2000; +STRUCT!{struct DOT11_BSS_DESCRIPTION { + uReserved: ULONG, + dot11BSSID: DOT11_MAC_ADDRESS, + dot11BSSType: DOT11_BSS_TYPE, + usBeaconPeriod: USHORT, + ullTimestamp: ULONGLONG, + usCapabilityInformation: USHORT, + uBufferLength: ULONG, + ucBuffer: [UCHAR; 1], +}} +pub type PDOT11_BSS_DESCRIPTION = *mut DOT11_BSS_DESCRIPTION; +STRUCT!{struct DOT11_JOIN_REQUEST { + uJoinFailureTimeout: ULONG, + OperationalRateSet: DOT11_RATE_SET, + uChCenterFrequency: ULONG, + dot11BSSDescription: DOT11_BSS_DESCRIPTION, +}} +pub type PDOT11_JOIN_REQUEST = *mut DOT11_JOIN_REQUEST; +pub const OID_DOT11_START_REQUEST: u32 = OID_DOT11_NDIS_START + 14; +STRUCT!{struct DOT11_START_REQUEST { + uStartFailureTimeout: ULONG, + OperationalRateSet: DOT11_RATE_SET, + uChCenterFrequency: ULONG, + dot11BSSDescription: DOT11_BSS_DESCRIPTION, +}} +pub type PDOT11_START_REQUEST = *mut DOT11_START_REQUEST; +pub const OID_DOT11_UPDATE_IE: u32 = OID_DOT11_NDIS_START + 15; +ENUM!{enum DOT11_UPDATE_IE_OP { + dot11_update_ie_op_create_replace = 1, + dot11_update_ie_op_delete = 2, +}} +pub type PDOT11_UPDATE_IE_OP = *mut DOT11_UPDATE_IE_OP; +STRUCT!{struct DOT11_UPDATE_IE { + dot11UpdateIEOp: DOT11_UPDATE_IE_OP, + uBufferLength: ULONG, + ucBuffer: [UCHAR; 1], +}} +pub type PDOT11_UPDATE_IE = *mut DOT11_UPDATE_IE; +pub const OID_DOT11_RESET_REQUEST: u32 = OID_DOT11_NDIS_START + 16; +ENUM!{enum DOT11_RESET_TYPE { + dot11_reset_type_phy = 1, + dot11_reset_type_mac = 2, + dot11_reset_type_phy_and_mac = 3, +}} +pub type PDOT11_RESET_TYPE = *mut DOT11_RESET_TYPE; +STRUCT!{struct DOT11_RESET_REQUEST { + dot11ResetType: DOT11_RESET_TYPE, + dot11MacAddress: DOT11_MAC_ADDRESS, + bSetDefaultMIB: BOOLEAN, +}} +pub type PDOT11_RESET_REQUEST = *mut DOT11_RESET_REQUEST; +pub const OID_DOT11_NIC_POWER_STATE: u32 = OID_DOT11_NDIS_START + 17; +pub const OID_DOT11_OPTIONAL_CAPABILITY: u32 = OID_DOT11_NDIS_START + 18; +STRUCT!{struct DOT11_OPTIONAL_CAPABILITY { + uReserved: ULONG, + bDot11PCF: BOOLEAN, + bDot11PCFMPDUTransferToPC: BOOLEAN, + bStrictlyOrderedServiceClass: BOOLEAN, +}} +pub type PDOT11_OPTIONAL_CAPABILITY = *mut DOT11_OPTIONAL_CAPABILITY; +pub const OID_DOT11_CURRENT_OPTIONAL_CAPABILITY: u32 = OID_DOT11_NDIS_START + 19; +STRUCT!{struct DOT11_CURRENT_OPTIONAL_CAPABILITY { + uReserved: ULONG, + bDot11CFPollable: BOOLEAN, + bDot11PCF: BOOLEAN, + bDot11PCFMPDUTransferToPC: BOOLEAN, + bStrictlyOrderedServiceClass: BOOLEAN, +}} +pub type PDOT11_CURRENT_OPTIONAL_CAPABILITY = *mut DOT11_CURRENT_OPTIONAL_CAPABILITY; +pub const OID_DOT11_STATION_ID: u32 = OID_DOT11_NDIS_START + 20; +pub const OID_DOT11_MEDIUM_OCCUPANCY_LIMIT: u32 = OID_DOT11_NDIS_START + 21; +pub const OID_DOT11_CF_POLLABLE: u32 = OID_DOT11_NDIS_START + 22; +pub const OID_DOT11_CFP_PERIOD: u32 = OID_DOT11_NDIS_START + 23; +pub const OID_DOT11_CFP_MAX_DURATION: u32 = OID_DOT11_NDIS_START + 24; +pub const OID_DOT11_POWER_MGMT_MODE: u32 = OID_DOT11_NDIS_START + 25; +ENUM!{enum DOT11_POWER_MODE { + dot11_power_mode_unknown = 0, + dot11_power_mode_active = 1, + dot11_power_mode_powersave = 2, +}} +pub type PDOT11_POWER_MODE = *mut DOT11_POWER_MODE; +pub const DOT11_POWER_SAVE_LEVEL_MAX_PSP: ULONG = 1; +pub const DOT11_POWER_SAVE_LEVEL_FAST_PSP: ULONG = 2; +STRUCT!{struct DOT11_POWER_MGMT_MODE { + dot11PowerMode: DOT11_POWER_MODE, + uPowerSaveLevel: ULONG, + usListenInterval: USHORT, + usAID: USHORT, + bReceiveDTIMs: BOOLEAN, +}} +pub type PDOT11_POWER_MGMT_MODE = *mut DOT11_POWER_MGMT_MODE; +pub const OID_DOT11_OPERATIONAL_RATE_SET: u32 = OID_DOT11_NDIS_START + 26; +pub const OID_DOT11_BEACON_PERIOD: u32 = OID_DOT11_NDIS_START + 27; +pub const OID_DOT11_DTIM_PERIOD: u32 = OID_DOT11_NDIS_START + 28; +pub const OID_DOT11_WEP_ICV_ERROR_COUNT: u32 = OID_DOT11_NDIS_START + 29; +pub const OID_DOT11_MAC_ADDRESS: u32 = OID_DOT11_NDIS_START + 30; +pub const OID_DOT11_RTS_THRESHOLD: u32 = OID_DOT11_NDIS_START + 31; +pub const OID_DOT11_SHORT_RETRY_LIMIT: u32 = OID_DOT11_NDIS_START + 32; +pub const OID_DOT11_LONG_RETRY_LIMIT: u32 = OID_DOT11_NDIS_START + 33; +pub const OID_DOT11_FRAGMENTATION_THRESHOLD: u32 = OID_DOT11_NDIS_START + 34; +pub const OID_DOT11_MAX_TRANSMIT_MSDU_LIFETIME: u32 = OID_DOT11_NDIS_START + 35; +pub const OID_DOT11_MAX_RECEIVE_LIFETIME: u32 = OID_DOT11_NDIS_START + 36; +pub const OID_DOT11_COUNTERS_ENTRY: u32 = OID_DOT11_NDIS_START + 37; +STRUCT!{struct DOT11_COUNTERS_ENTRY { + uTransmittedFragmentCount: ULONG, + uMulticastTransmittedFrameCount: ULONG, + uFailedCount: ULONG, + uRetryCount: ULONG, + uMultipleRetryCount: ULONG, + uFrameDuplicateCount: ULONG, + uRTSSuccessCount: ULONG, + uRTSFailureCount: ULONG, + uACKFailureCount: ULONG, + uReceivedFragmentCount: ULONG, + uMulticastReceivedFrameCount: ULONG, + uFCSErrorCount: ULONG, + uTransmittedFrameCount: ULONG, +}} +pub type PDOT11_COUNTERS_ENTRY = *mut DOT11_COUNTERS_ENTRY; +pub const OID_DOT11_SUPPORTED_PHY_TYPES: u32 = OID_DOT11_NDIS_START + 38; +STRUCT!{struct DOT11_SUPPORTED_PHY_TYPES { + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + dot11PHYType: [DOT11_PHY_TYPE; 1], +}} +pub type PDOT11_SUPPORTED_PHY_TYPES = *mut DOT11_SUPPORTED_PHY_TYPES; +pub const OID_DOT11_CURRENT_REG_DOMAIN: u32 = OID_DOT11_NDIS_START + 39; +pub const DOT11_REG_DOMAIN_OTHER: ULONG = 0x00000000; +pub const DOT11_REG_DOMAIN_FCC: ULONG = 0x00000010; +pub const DOT11_REG_DOMAIN_DOC: ULONG = 0x00000020; +pub const DOT11_REG_DOMAIN_ETSI: ULONG = 0x00000030; +pub const DOT11_REG_DOMAIN_SPAIN: ULONG = 0x00000031; +pub const DOT11_REG_DOMAIN_FRANCE: ULONG = 0x00000032; +pub const DOT11_REG_DOMAIN_MKK: ULONG = 0x00000040; +pub const OID_DOT11_TEMP_TYPE: u32 = OID_DOT11_NDIS_START + 40; +ENUM!{enum DOT11_TEMP_TYPE { + dot11_temp_type_unknown = 0, + dot11_temp_type_1 = 1, + dot11_temp_type_2 = 2, +}} +pub type PDOT11_TEMP_TYPE = *mut DOT11_TEMP_TYPE; +pub const OID_DOT11_CURRENT_TX_ANTENNA: u32 = OID_DOT11_NDIS_START + 41; +pub const OID_DOT11_DIVERSITY_SUPPORT: u32 = OID_DOT11_NDIS_START + 42; +ENUM!{enum DOT11_DIVERSITY_SUPPORT { + dot11_diversity_support_unknown = 0, + dot11_diversity_support_fixedlist = 1, + dot11_diversity_support_notsupported = 2, + dot11_diversity_support_dynamic = 3, +}} +pub type PDOT11_DIVERSITY_SUPPORT = *mut DOT11_DIVERSITY_SUPPORT; +pub const OID_DOT11_CURRENT_RX_ANTENNA: u32 = OID_DOT11_NDIS_START + 43; +pub const OID_DOT11_SUPPORTED_POWER_LEVELS: u32 = OID_DOT11_NDIS_START + 44; +STRUCT!{struct DOT11_SUPPORTED_POWER_LEVELS { + uNumOfSupportedPowerLevels: ULONG, + uTxPowerLevelValues: [ULONG; 8], +}} +pub type PDOT11_SUPPORTED_POWER_LEVELS = *mut DOT11_SUPPORTED_POWER_LEVELS; +pub const OID_DOT11_CURRENT_TX_POWER_LEVEL: u32 = OID_DOT11_NDIS_START + 45; +pub const OID_DOT11_HOP_TIME: u32 = OID_DOT11_NDIS_START + 46; +pub const OID_DOT11_CURRENT_CHANNEL_NUMBER: u32 = OID_DOT11_NDIS_START + 47; +pub const OID_DOT11_MAX_DWELL_TIME: u32 = OID_DOT11_NDIS_START + 48; +pub const OID_DOT11_CURRENT_DWELL_TIME: u32 = OID_DOT11_NDIS_START + 49; +pub const OID_DOT11_CURRENT_SET: u32 = OID_DOT11_NDIS_START + 50; +pub const OID_DOT11_CURRENT_PATTERN: u32 = OID_DOT11_NDIS_START + 51; +pub const OID_DOT11_CURRENT_INDEX: u32 = OID_DOT11_NDIS_START + 52; +pub const OID_DOT11_CURRENT_CHANNEL: u32 = OID_DOT11_NDIS_START + 53; +pub const OID_DOT11_CCA_MODE_SUPPORTED: u32 = OID_DOT11_NDIS_START + 54; +pub const DOT11_CCA_MODE_ED_ONLY: ULONG = 0x00000001; +pub const DOT11_CCA_MODE_CS_ONLY: ULONG = 0x00000002; +pub const DOT11_CCA_MODE_ED_and_CS: ULONG = 0x00000004; +pub const DOT11_CCA_MODE_CS_WITH_TIMER: ULONG = 0x00000008; +pub const DOT11_CCA_MODE_HRCS_AND_ED: ULONG = 0x00000010; +pub const OID_DOT11_CURRENT_CCA_MODE: u32 = OID_DOT11_NDIS_START + 55; +pub const OID_DOT11_ED_THRESHOLD: u32 = OID_DOT11_NDIS_START + 56; +pub const OID_DOT11_CCA_WATCHDOG_TIMER_MAX: u32 = OID_DOT11_NDIS_START + 57; +pub const OID_DOT11_CCA_WATCHDOG_COUNT_MAX: u32 = OID_DOT11_NDIS_START + 58; +pub const OID_DOT11_CCA_WATCHDOG_TIMER_MIN: u32 = OID_DOT11_NDIS_START + 59; +pub const OID_DOT11_CCA_WATCHDOG_COUNT_MIN: u32 = OID_DOT11_NDIS_START + 60; +pub const OID_DOT11_REG_DOMAINS_SUPPORT_VALUE: u32 = OID_DOT11_NDIS_START + 61; +STRUCT!{struct DOT11_REG_DOMAIN_VALUE { + uRegDomainsSupportIndex: ULONG, + uRegDomainsSupportValue: ULONG, +}} +pub type PDOT11_REG_DOMAIN_VALUE = *mut DOT11_REG_DOMAIN_VALUE; +STRUCT!{struct DOT11_REG_DOMAINS_SUPPORT_VALUE { + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + dot11RegDomainValue: [DOT11_REG_DOMAIN_VALUE; 1], +}} +pub type PDOT11_REG_DOMAINS_SUPPORT_VALUE = *mut DOT11_REG_DOMAINS_SUPPORT_VALUE; +pub const OID_DOT11_SUPPORTED_TX_ANTENNA: u32 = OID_DOT11_NDIS_START + 62; +STRUCT!{struct DOT11_SUPPORTED_ANTENNA { + uAntennaListIndex: ULONG, + bSupportedAntenna: BOOLEAN, +}} +pub type PDOT11_SUPPORTED_ANTENNA = *mut DOT11_SUPPORTED_ANTENNA; +STRUCT!{struct DOT11_SUPPORTED_ANTENNA_LIST { + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + dot11SupportedAntenna: [DOT11_SUPPORTED_ANTENNA; 1], +}} +pub type PDOT11_SUPPORTED_ANTENNA_LIST = *mut DOT11_SUPPORTED_ANTENNA_LIST; +pub const OID_DOT11_SUPPORTED_RX_ANTENNA: u32 = OID_DOT11_NDIS_START + 63; +pub const OID_DOT11_DIVERSITY_SELECTION_RX: u32 = OID_DOT11_NDIS_START + 64; +STRUCT!{struct DOT11_DIVERSITY_SELECTION_RX { + uAntennaListIndex: ULONG, + bDiversitySelectionRX: BOOLEAN, +}} +pub type PDOT11_DIVERSITY_SELECTION_RX = *mut DOT11_DIVERSITY_SELECTION_RX; +STRUCT!{struct DOT11_DIVERSITY_SELECTION_RX_LIST { + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + dot11DiversitySelectionRx: [DOT11_DIVERSITY_SELECTION_RX; 1], +}} +pub type PDOT11_DIVERSITY_SELECTION_RX_LIST = *mut DOT11_DIVERSITY_SELECTION_RX_LIST; +pub const OID_DOT11_SUPPORTED_DATA_RATES_VALUE: u32 = OID_DOT11_NDIS_START + 65; +pub const MAX_NUM_SUPPORTED_RATES: usize = 8; +pub const MAX_NUM_SUPPORTED_RATES_V2: usize = 255; +STRUCT!{struct DOT11_SUPPORTED_DATA_RATES_VALUE { + ucSupportedTxDataRatesValue: [UCHAR; MAX_NUM_SUPPORTED_RATES], + ucSupportedRxDataRatesValue: [UCHAR; MAX_NUM_SUPPORTED_RATES], +}} +pub type PDOT11_SUPPORTED_DATA_RATES_VALUE = *mut DOT11_SUPPORTED_DATA_RATES_VALUE; +STRUCT!{struct DOT11_SUPPORTED_DATA_RATES_VALUE_V2 { + ucSupportedTxDataRatesValue: [UCHAR; MAX_NUM_SUPPORTED_RATES_V2], + ucSupportedRxDataRatesValue: [UCHAR; MAX_NUM_SUPPORTED_RATES_V2], +}} +pub type PDOT11_SUPPORTED_DATA_RATES_VALUE_V2 = *mut DOT11_SUPPORTED_DATA_RATES_VALUE_V2; +pub type DOT11_SUPPORTED_DATA_RATES_VALUE_V1 = DOT11_SUPPORTED_DATA_RATES_VALUE_V2; +pub type PDOT11_SUPPORTED_DATA_RATES_VALUE_V1 = *mut DOT11_SUPPORTED_DATA_RATES_VALUE_V2; +pub const OID_DOT11_CURRENT_FREQUENCY: u32 = OID_DOT11_NDIS_START + 66; +pub const OID_DOT11_TI_THRESHOLD: u32 = OID_DOT11_NDIS_START + 67; +pub const OID_DOT11_FREQUENCY_BANDS_SUPPORTED: u32 = OID_DOT11_NDIS_START + 68; +pub const DOT11_FREQUENCY_BANDS_LOWER: u32 = 0x00000001; +pub const DOT11_FREQUENCY_BANDS_MIDDLE: u32 = 0x00000002; +pub const DOT11_FREQUENCY_BANDS_UPPER: u32 = 0x00000004; +pub const OID_DOT11_SHORT_PREAMBLE_OPTION_IMPLEMENTED: u32 = OID_DOT11_NDIS_START + 69; +pub const OID_DOT11_PBCC_OPTION_IMPLEMENTED: u32 = OID_DOT11_NDIS_START + 70; +pub const OID_DOT11_CHANNEL_AGILITY_PRESENT: u32 = OID_DOT11_NDIS_START + 71; +pub const OID_DOT11_CHANNEL_AGILITY_ENABLED: u32 = OID_DOT11_NDIS_START + 72; +pub const OID_DOT11_HR_CCA_MODE_SUPPORTED: u32 = OID_DOT11_NDIS_START + 73; +pub const DOT11_HR_CCA_MODE_ED_ONLY: ULONG = 0x00000001; +pub const DOT11_HR_CCA_MODE_CS_ONLY: ULONG = 0x00000002; +pub const DOT11_HR_CCA_MODE_CS_AND_ED: ULONG = 0x00000004; +pub const DOT11_HR_CCA_MODE_CS_WITH_TIMER: ULONG = 0x00000008; +pub const DOT11_HR_CCA_MODE_HRCS_AND_ED: ULONG = 0x00000010; +pub const OID_DOT11_MULTI_DOMAIN_CAPABILITY_IMPLEMENTED: u32 = OID_DOT11_NDIS_START + 74; +pub const OID_DOT11_MULTI_DOMAIN_CAPABILITY_ENABLED: u32 = OID_DOT11_NDIS_START + 75; +pub const OID_DOT11_COUNTRY_STRING: u32 = OID_DOT11_NDIS_START + 76; +STRUCT!{struct DOT11_MULTI_DOMAIN_CAPABILITY_ENTRY { + uMultiDomainCapabilityIndex: ULONG, + uFirstChannelNumber: ULONG, + uNumberOfChannels: ULONG, + lMaximumTransmitPowerLevel: LONG, +}} +pub type PDOT11_MULTI_DOMAIN_CAPABILITY_ENTRY = *mut DOT11_MULTI_DOMAIN_CAPABILITY_ENTRY; +STRUCT!{struct DOT11_MD_CAPABILITY_ENTRY_LIST { + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + dot11MDCapabilityEntry: [DOT11_MULTI_DOMAIN_CAPABILITY_ENTRY; 1], +}} +pub type PDOT11_MD_CAPABILITY_ENTRY_LIST = *mut DOT11_MD_CAPABILITY_ENTRY_LIST; +pub const OID_DOT11_MULTI_DOMAIN_CAPABILITY: u32 = OID_DOT11_NDIS_START + 77; +pub const OID_DOT11_EHCC_PRIME_RADIX: u32 = OID_DOT11_NDIS_START + 78; +pub const OID_DOT11_EHCC_NUMBER_OF_CHANNELS_FAMILY_INDEX: u32 = OID_DOT11_NDIS_START + 79; +pub const OID_DOT11_EHCC_CAPABILITY_IMPLEMENTED: u32 = OID_DOT11_NDIS_START + 80; +pub const OID_DOT11_EHCC_CAPABILITY_ENABLED: u32 = OID_DOT11_NDIS_START + 81; +pub const OID_DOT11_HOP_ALGORITHM_ADOPTED: u32 = OID_DOT11_NDIS_START + 82; +ENUM!{enum DOT11_HOP_ALGO_ADOPTED { + dot11_hop_algo_current = 0, + dot11_hop_algo_hop_index = 1, + dot11_hop_algo_hcc = 2, +}} +pub type PDOT11_HOP_ALGO_ADOPTED = *mut DOT11_HOP_ALGO_ADOPTED; +pub const OID_DOT11_RANDOM_TABLE_FLAG: u32 = OID_DOT11_NDIS_START + 83; +pub const OID_DOT11_NUMBER_OF_HOPPING_SETS: u32 = OID_DOT11_NDIS_START + 84; +pub const OID_DOT11_HOP_MODULUS: u32 = OID_DOT11_NDIS_START + 85; +pub const OID_DOT11_HOP_OFFSET: u32 = OID_DOT11_NDIS_START + 86; +pub const OID_DOT11_HOPPING_PATTERN: u32 = OID_DOT11_NDIS_START + 87; +STRUCT!{struct DOT11_HOPPING_PATTERN_ENTRY { + uHoppingPatternIndex: ULONG, + uRandomTableFieldNumber: ULONG, +}} +pub type PDOT11_HOPPING_PATTERN_ENTRY = *mut DOT11_HOPPING_PATTERN_ENTRY; +STRUCT!{struct DOT11_HOPPING_PATTERN_ENTRY_LIST { + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + dot11HoppingPatternEntry: [DOT11_HOPPING_PATTERN_ENTRY; 1], +}} +pub type PDOT11_HOPPING_PATTERN_ENTRY_LIST = *mut DOT11_HOPPING_PATTERN_ENTRY_LIST; +pub const OID_DOT11_RANDOM_TABLE_FIELD_NUMBER: u32 = OID_DOT11_NDIS_START + 88; +pub const OID_DOT11_WPA_TSC: u32 = OID_DOT11_NDIS_START + 89; +STRUCT!{struct DOT11_WPA_TSC { + uReserved: ULONG, + dot11OffloadType: DOT11_OFFLOAD_TYPE, + hOffload: HANDLE, + dot11IV48Counter: DOT11_IV48_COUNTER, +}} +pub type PDOT11_WPA_TSC = *mut DOT11_WPA_TSC; +pub const OID_DOT11_RSSI_RANGE: u32 = OID_DOT11_NDIS_START + 90; +STRUCT!{struct DOT11_RSSI_RANGE { + dot11PhyType: DOT11_PHY_TYPE, + uRSSIMin: ULONG, + uRSSIMax: ULONG, +}} +pub type PDOT11_RSSI_RANGE = *mut DOT11_RSSI_RANGE; +pub const OID_DOT11_RF_USAGE: u32 = OID_DOT11_NDIS_START + 91; +pub const OID_DOT11_NIC_SPECIFIC_EXTENSION: u32 = OID_DOT11_NDIS_START + 92; +STRUCT!{struct DOT11_NIC_SPECIFIC_EXTENSION { + uBufferLength: ULONG, + uTotalBufferLength: ULONG, + ucBuffer: [UCHAR; 1], +}} +pub type PDOT11_NIC_SPECIFIC_EXTENSION = *mut DOT11_NIC_SPECIFIC_EXTENSION; +pub const OID_DOT11_AP_JOIN_REQUEST: u32 = OID_DOT11_NDIS_START + 93; +STRUCT!{struct DOT11_AP_JOIN_REQUEST { + uJoinFailureTimeout: ULONG, + OperationalRateSet: DOT11_RATE_SET, + uChCenterFrequency: ULONG, + dot11BSSDescription: DOT11_BSS_DESCRIPTION, +}} +pub type PDOT11_AP_JOIN_REQUEST = *mut DOT11_AP_JOIN_REQUEST; +pub const OID_DOT11_ERP_PBCC_OPTION_IMPLEMENTED: u32 = OID_DOT11_NDIS_START + 94; +pub const OID_DOT11_ERP_PBCC_OPTION_ENABLED: u32 = OID_DOT11_NDIS_START + 95; +pub const OID_DOT11_DSSS_OFDM_OPTION_IMPLEMENTED: u32 = OID_DOT11_NDIS_START + 96; +pub const OID_DOT11_DSSS_OFDM_OPTION_ENABLED: u32 = OID_DOT11_NDIS_START + 97; +pub const OID_DOT11_SHORT_SLOT_TIME_OPTION_IMPLEMENTED: u32 = OID_DOT11_NDIS_START + 98; +pub const OID_DOT11_SHORT_SLOT_TIME_OPTION_ENABLED: u32 = OID_DOT11_NDIS_START + 99; +pub const OID_DOT11_MAX_MAC_ADDRESS_STATES: u32 = OID_DOT11_NDIS_START + 100; +pub const OID_DOT11_RECV_SENSITIVITY_LIST: u32 = OID_DOT11_NDIS_START + 101; +STRUCT!{struct DOT11_RECV_SENSITIVITY { + ucDataRate: UCHAR, + lRSSIMin: LONG, + lRSSIMax: LONG, +}} +pub type PDOT11_RECV_SENSITIVITY = *mut DOT11_RECV_SENSITIVITY; +UNION!{union DOT11_RECV_SENSITIVITY_LIST_u { + [u32; 1], + dot11PhyType dot11PhyType_mut: DOT11_PHY_TYPE, + uPhyId uPhyId_mut: ULONG, +}} +STRUCT!{struct DOT11_RECV_SENSITIVITY_LIST { + u: DOT11_RECV_SENSITIVITY_LIST_u, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + dot11RecvSensitivity: [DOT11_RECV_SENSITIVITY; 1], +}} +pub type PDOT11_RECV_SENSITIVITY_LIST = *mut DOT11_RECV_SENSITIVITY_LIST; +pub const OID_DOT11_WME_IMPLEMENTED: u32 = OID_DOT11_NDIS_START + 102; +pub const OID_DOT11_WME_ENABLED: u32 = OID_DOT11_NDIS_START + 103; +pub const OID_DOT11_WME_AC_PARAMETERS: u32 = OID_DOT11_NDIS_START + 104; +ENUM!{enum DOT11_AC_PARAM { + dot11_AC_param_BE = 0, + dot11_AC_param_BK = 1, + dot11_AC_param_VI = 2, + dot11_AC_param_VO = 3, + dot11_AC_param_max = 4, +}} +pub type PDOT11_AC_PARAM = *mut DOT11_AC_PARAM; +STRUCT!{struct DOT11_WME_AC_PARAMETERS { + ucAccessCategoryIndex: UCHAR, + ucAIFSN: UCHAR, + ucECWmin: UCHAR, + ucECWmax: UCHAR, + usTXOPLimit: USHORT, +}} +pub type PDOT11_WME_AC_PARAMETERS = *mut DOT11_WME_AC_PARAMETERS; +STRUCT!{struct DOT11_WME_AC_PARAMETERS_LIST { + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + dot11WMEACParameters: [DOT11_WME_AC_PARAMETERS; 1], +}} +pub type PDOT11_WME_AC_PARAMETERS_LIST = *mut DOT11_WME_AC_PARAMETERS_LIST; +pub const OID_DOT11_WME_UPDATE_IE: u32 = OID_DOT11_NDIS_START + 105; +STRUCT!{struct DOT11_WME_UPDATE_IE { + uParamElemMinBeaconIntervals: ULONG, + uWMEInfoElemOffset: ULONG, + uWMEInfoElemLength: ULONG, + uWMEParamElemOffset: ULONG, + uWMEParamElemLength: ULONG, + ucBuffer: [UCHAR; 1], +}} +pub type PDOT11_WME_UPDATE_IE = *mut DOT11_WME_UPDATE_IE; +pub const OID_DOT11_QOS_TX_QUEUES_SUPPORTED: u32 = OID_DOT11_NDIS_START + 106; +pub const OID_DOT11_QOS_TX_DURATION: u32 = OID_DOT11_NDIS_START + 107; +STRUCT!{struct DOT11_QOS_TX_DURATION { + uNominalMSDUSize: ULONG, + uMinPHYRate: ULONG, + uDuration: ULONG, +}} +pub type PDOT11_QOS_TX_DURATION = *mut DOT11_QOS_TX_DURATION; +pub const OID_DOT11_QOS_TX_MEDIUM_TIME: u32 = OID_DOT11_NDIS_START + 108; +STRUCT!{struct DOT11_QOS_TX_MEDIUM_TIME { + dot11PeerAddress: DOT11_MAC_ADDRESS, + ucQoSPriority: UCHAR, + uMediumTimeAdmited: ULONG, +}} +pub type PDOT11_QOS_TX_MEDIUM_TIME = *mut DOT11_QOS_TX_MEDIUM_TIME; +pub const OID_DOT11_SUPPORTED_OFDM_FREQUENCY_LIST: u32 = OID_DOT11_NDIS_START + 109; +STRUCT!{struct DOT11_SUPPORTED_OFDM_FREQUENCY { + uCenterFrequency: ULONG, +}} +pub type PDOT11_SUPPORTED_OFDM_FREQUENCY = *mut DOT11_SUPPORTED_OFDM_FREQUENCY; +STRUCT!{struct DOT11_SUPPORTED_OFDM_FREQUENCY_LIST { + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + dot11SupportedOFDMFrequency: [DOT11_SUPPORTED_OFDM_FREQUENCY; 1], +}} +pub type PDOT11_SUPPORTED_OFDM_FREQUENCY_LIST = *mut DOT11_SUPPORTED_OFDM_FREQUENCY_LIST; +pub const OID_DOT11_SUPPORTED_DSSS_CHANNEL_LIST: u32 = OID_DOT11_NDIS_START + 110; +STRUCT!{struct DOT11_SUPPORTED_DSSS_CHANNEL { + uChannel: ULONG, +}} +pub type PDOT11_SUPPORTED_DSSS_CHANNEL = *mut DOT11_SUPPORTED_DSSS_CHANNEL; +STRUCT!{struct DOT11_SUPPORTED_DSSS_CHANNEL_LIST { + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + dot11SupportedDSSSChannel: [DOT11_SUPPORTED_DSSS_CHANNEL; 1], +}} +pub type PDOT11_SUPPORTED_DSSS_CHANNEL_LIST = *mut DOT11_SUPPORTED_DSSS_CHANNEL_LIST; +STRUCT!{struct DOT11_BYTE_ARRAY { + Header: NDIS_OBJECT_HEADER, + uNumOfBytes: ULONG, + uTotalNumOfBytes: ULONG, + ucBuffer: [UCHAR; 1], +}} +pub type PDOT11_BYTE_ARRAY = *mut DOT11_BYTE_ARRAY; +pub const OID_DOT11_AUTO_CONFIG_ENABLED: u32 = + NWF_DEFINE_OID!(120, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const DOT11_PHY_AUTO_CONFIG_ENABLED_FLAG: ULONG = 0x00000001; +pub const DOT11_MAC_AUTO_CONFIG_ENABLED_FLAG: ULONG = 0x00000002; +pub const OID_DOT11_ENUM_BSS_LIST: u32 = + NWF_DEFINE_OID!(121, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const DOT11_BSS_ENTRY_BYTE_ARRAY_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_BSS_ENTRY_PHY_SPECIFIC_INFO_FHSS { + uHopPattern: ULONG, + uHopSet: ULONG, + uDwellTime: ULONG, +}} +UNION!{union DOT11_BSS_ENTRY_PHY_SPECIFIC_INFO { + [u32; 3], + uChCenterFrequency uChCenterFrequency_mut: ULONG, + FHSS FHSS_mut: DOT11_BSS_ENTRY_PHY_SPECIFIC_INFO_FHSS, +}} +pub type PDOT11_BSS_ENTRY_PHY_SPECIFIC_INFO = *mut DOT11_BSS_ENTRY_PHY_SPECIFIC_INFO; +STRUCT!{struct DOT11_BSS_ENTRY { + uPhyId: ULONG, + PhySpecificInfo: DOT11_BSS_ENTRY_PHY_SPECIFIC_INFO, + dot11BSSID: DOT11_MAC_ADDRESS, + dot11BSSType: DOT11_BSS_TYPE, + lRSSI: LONG, + uLinkQuality: ULONG, + bInRegDomain: BOOLEAN, + usBeaconPeriod: USHORT, + ullTimestamp: ULONGLONG, + ullHostTimestamp: ULONGLONG, + usCapabilityInformation: USHORT, + uBufferLength: ULONG, + ucBuffer: [UCHAR; 1], +}} +pub type PDOT11_BSS_ENTRY = *mut DOT11_BSS_ENTRY; +pub const OID_DOT11_FLUSH_BSS_LIST: u32 = + NWF_DEFINE_OID!(122, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_POWER_MGMT_REQUEST: u32 = + NWF_DEFINE_OID!(123, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const DOT11_POWER_SAVING_NO_POWER_SAVING: ULONG = 0; +pub const DOT11_POWER_SAVING_FAST_PSP: ULONG = 8; +pub const DOT11_POWER_SAVING_MAX_PSP: ULONG = 16; +pub const DOT11_POWER_SAVING_MAXIMUM_LEVEL: ULONG = 24; +pub const OID_DOT11_DESIRED_SSID_LIST: u32 = + NWF_DEFINE_OID!(124, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +STRUCT!{struct DOT11_SSID_LIST { + Header: NDIS_OBJECT_HEADER, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + SSIDs: [DOT11_SSID; 1], +}} +pub type PDOT11_SSID_LIST = *mut DOT11_SSID_LIST; +pub const DOT11_SSID_LIST_REVISION_1: UCHAR = 1; +pub const OID_DOT11_EXCLUDED_MAC_ADDRESS_LIST: u32 = + NWF_DEFINE_OID!(125, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +STRUCT!{struct DOT11_MAC_ADDRESS_LIST { + Header: NDIS_OBJECT_HEADER, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + MacAddrs: [DOT11_MAC_ADDRESS; 1], +}} +pub type PDOT11_MAC_ADDRESS_LIST = *mut DOT11_MAC_ADDRESS_LIST; +pub const DOT11_MAC_ADDRESS_LIST_REVISION_1: UCHAR = 1; +pub const OID_DOT11_DESIRED_BSSID_LIST: u32 = + NWF_DEFINE_OID!(126, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_DESIRED_BSS_TYPE: u32 = + NWF_DEFINE_OID!(127, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_PMKID_LIST: u32 = NWF_DEFINE_OID!(128, NWF_OPERATIONAL_OID, NWF_OPTIONAL_OID); +pub type DOT11_PMKID_VALUE = [UCHAR; 16]; +STRUCT!{struct DOT11_PMKID_ENTRY { + BSSID: DOT11_MAC_ADDRESS, + PMKID: DOT11_PMKID_VALUE, + uFlags: ULONG, +}} +pub type PDOT11_PMKID_ENTRY = *mut DOT11_PMKID_ENTRY; +STRUCT!{struct DOT11_PMKID_LIST { + Header: NDIS_OBJECT_HEADER, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + PMKIDs: [DOT11_PMKID_ENTRY; 1], +}} +pub type PDOT11_PMKID_LIST = *mut DOT11_PMKID_LIST; +pub const DOT11_PMKID_LIST_REVISION_1: UCHAR = 1; +pub const OID_DOT11_CONNECT_REQUEST: u32 = + NWF_DEFINE_OID!(129, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_EXCLUDE_UNENCRYPTED: u32 = + NWF_DEFINE_OID!(130, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_STATISTICS: u32 = + NWF_DEFINE_OID!(131, NWF_STATISTICS_OID, NWF_MANDATORY_OID); +pub const DOT11_STATISTICS_UNKNOWN: ULONGLONG = -1i64 as u64; +STRUCT!{struct DOT11_PHY_FRAME_STATISTICS { + ullTransmittedFrameCount: ULONGLONG, + ullMulticastTransmittedFrameCount: ULONGLONG, + ullFailedCount: ULONGLONG, + ullRetryCount: ULONGLONG, + ullMultipleRetryCount: ULONGLONG, + ullMaxTXLifetimeExceededCount: ULONGLONG, + ullTransmittedFragmentCount: ULONGLONG, + ullRTSSuccessCount: ULONGLONG, + ullRTSFailureCount: ULONGLONG, + ullACKFailureCount: ULONGLONG, + ullReceivedFrameCount: ULONGLONG, + ullMulticastReceivedFrameCount: ULONGLONG, + ullPromiscuousReceivedFrameCount: ULONGLONG, + ullMaxRXLifetimeExceededCount: ULONGLONG, + ullFrameDuplicateCount: ULONGLONG, + ullReceivedFragmentCount: ULONGLONG, + ullPromiscuousReceivedFragmentCount: ULONGLONG, + ullFCSErrorCount: ULONGLONG, +}} +pub type PDOT11_PHY_FRAME_STATISTICS = *mut DOT11_PHY_FRAME_STATISTICS; +STRUCT!{struct DOT11_MAC_FRAME_STATISTICS { + ullTransmittedFrameCount: ULONGLONG, + ullReceivedFrameCount: ULONGLONG, + ullTransmittedFailureFrameCount: ULONGLONG, + ullReceivedFailureFrameCount: ULONGLONG, + ullWEPExcludedCount: ULONGLONG, + ullTKIPLocalMICFailures: ULONGLONG, + ullTKIPReplays: ULONGLONG, + ullTKIPICVErrorCount: ULONGLONG, + ullCCMPReplays: ULONGLONG, + ullCCMPDecryptErrors: ULONGLONG, + ullWEPUndecryptableCount: ULONGLONG, + ullWEPICVErrorCount: ULONGLONG, + ullDecryptSuccessCount: ULONGLONG, + ullDecryptFailureCount: ULONGLONG, +}} +pub type PDOT11_MAC_FRAME_STATISTICS = *mut DOT11_MAC_FRAME_STATISTICS; +STRUCT!{struct DOT11_STATISTICS { + Header: NDIS_OBJECT_HEADER, + ullFourWayHandshakeFailures: ULONGLONG, + ullTKIPCounterMeasuresInvoked: ULONGLONG, + ullReserved: ULONGLONG, + MacUcastCounters: DOT11_MAC_FRAME_STATISTICS, + MacMcastCounters: DOT11_MAC_FRAME_STATISTICS, + PhyCounters: [DOT11_PHY_FRAME_STATISTICS; 1], +}} +pub type PDOT11_STATISTICS = *mut DOT11_STATISTICS; +pub const DOT11_STATISTICS_REVISION_1: UCHAR = 1; +pub const OID_DOT11_PRIVACY_EXEMPTION_LIST: u32 = + NWF_DEFINE_OID!(132, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +STRUCT!{struct DOT11_PRIVACY_EXEMPTION { + usEtherType: USHORT, + usExemptionActionType: USHORT, + usExemptionPacketType: USHORT, +}} +pub type PDOT11_PRIVACY_EXEMPTION = *mut DOT11_PRIVACY_EXEMPTION; +pub const DOT11_EXEMPT_NO_EXEMPTION: USHORT = 0; +pub const DOT11_EXEMPT_ALWAYS: USHORT = 1; +pub const DOT11_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE: USHORT = 2; +pub const DOT11_EXEMPT_UNICAST: USHORT = 1; +pub const DOT11_EXEMPT_MULTICAST: USHORT = 2; +pub const DOT11_EXEMPT_BOTH: USHORT = 3; +STRUCT!{struct DOT11_PRIVACY_EXEMPTION_LIST { + Header: NDIS_OBJECT_HEADER, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + PrivacyExemptionEntries: [DOT11_PRIVACY_EXEMPTION; 1], +}} +pub type PDOT11_PRIVACY_EXEMPTION_LIST = *mut DOT11_PRIVACY_EXEMPTION_LIST; +pub const DOT11_PRIVACY_EXEMPTION_LIST_REVISION_1: UCHAR = 1; +pub const OID_DOT11_ENABLED_AUTHENTICATION_ALGORITHM: u32 = + NWF_DEFINE_OID!(133, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +STRUCT!{struct DOT11_AUTH_ALGORITHM_LIST { + Header: NDIS_OBJECT_HEADER, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + AlgorithmIds: [DOT11_AUTH_ALGORITHM; 1], +}} +pub type PDOT11_AUTH_ALGORITHM_LIST = *mut DOT11_AUTH_ALGORITHM_LIST; +pub const DOT11_AUTH_ALGORITHM_LIST_REVISION_1: UCHAR = 1; +pub const OID_DOT11_SUPPORTED_UNICAST_ALGORITHM_PAIR: u32 = + NWF_DEFINE_OID!(134, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +STRUCT!{struct DOT11_AUTH_CIPHER_PAIR_LIST { + Header: NDIS_OBJECT_HEADER, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + AuthCipherPairs: [DOT11_AUTH_CIPHER_PAIR; 1], +}} +pub type PDOT11_AUTH_CIPHER_PAIR_LIST = *mut DOT11_AUTH_CIPHER_PAIR_LIST; +pub const DOT11_AUTH_CIPHER_PAIR_LIST_REVISION_1: UCHAR = 1; +pub const OID_DOT11_ENABLED_UNICAST_CIPHER_ALGORITHM: u32 = + NWF_DEFINE_OID!(135, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +STRUCT!{struct DOT11_CIPHER_ALGORITHM_LIST { + Header: NDIS_OBJECT_HEADER, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + AlgorithmIds: [DOT11_CIPHER_ALGORITHM; 1], +}} +pub type PDOT11_CIPHER_ALGORITHM_LIST = *mut DOT11_CIPHER_ALGORITHM_LIST; +pub const DOT11_CIPHER_ALGORITHM_LIST_REVISION_1: UCHAR = 1; +pub const OID_DOT11_SUPPORTED_MULTICAST_ALGORITHM_PAIR: u32 = + NWF_DEFINE_OID!(136, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_ENABLED_MULTICAST_CIPHER_ALGORITHM: u32 = + NWF_DEFINE_OID!(137, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_CIPHER_DEFAULT_KEY_ID: u32 = + NWF_DEFINE_OID!(138, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_CIPHER_DEFAULT_KEY: u32 = + NWF_DEFINE_OID!(139, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +STRUCT!{struct DOT11_CIPHER_DEFAULT_KEY_VALUE { + Header: NDIS_OBJECT_HEADER, + uKeyIndex: ULONG, + AlgorithmId: DOT11_CIPHER_ALGORITHM, + MacAddr: DOT11_MAC_ADDRESS, + bDelete: BOOLEAN, + bStatic: BOOLEAN, + usKeyLength: USHORT, + ucKey: [UCHAR; 1], +}} +pub type PDOT11_CIPHER_DEFAULT_KEY_VALUE = *mut DOT11_CIPHER_DEFAULT_KEY_VALUE; +pub const DOT11_CIPHER_DEFAULT_KEY_VALUE_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_KEY_ALGO_TKIP_MIC { + ucIV48Counter: [UCHAR; 6], + ulTKIPKeyLength: ULONG, + ulMICKeyLength: ULONG, + ucTKIPMICKeys: [UCHAR; 1], +}} +pub type PDOT11_KEY_ALGO_TKIP_MIC = *mut DOT11_KEY_ALGO_TKIP_MIC; +STRUCT!{struct DOT11_KEY_ALGO_CCMP { + ucIV48Counter: [UCHAR; 6], + ulCCMPKeyLength: ULONG, + ucCCMPKey: [UCHAR; 1], +}} +pub type PDOT11_KEY_ALGO_CCMP = *mut DOT11_KEY_ALGO_CCMP; +STRUCT!{struct DOT11_KEY_ALGO_GCMP { + ucIV48Counter: [UCHAR; 6], + ulGCMPKeyLength: ULONG, + ucGCMPKey: [UCHAR; 1], +}} +pub type PDOT11_KEY_ALGO_GCMP = *mut DOT11_KEY_ALGO_GCMP; +STRUCT!{struct DOT11_KEY_ALGO_BIP { + ucIPN: [UCHAR; 6], + ulBIPKeyLength: ULONG, + ucBIPKey: [UCHAR; 1], +}} +pub type PDOT11_KEY_ALGO_BIP = *mut DOT11_KEY_ALGO_BIP; +pub const OID_DOT11_CIPHER_KEY_MAPPING_KEY: u32 = + NWF_DEFINE_OID!(140, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +ENUM!{enum DOT11_DIRECTION { + DOT11_DIR_INBOUND = 1, + DOT11_DIR_OUTBOUND = 2, + DOT11_DIR_BOTH = 3, +}} +pub type PDOT11_DIRECTION = *mut DOT11_DIRECTION; +pub const DOT11_CIPHER_KEY_MAPPING_KEY_VALUE_BYTE_ARRAY_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_CIPHER_KEY_MAPPING_KEY_VALUE { + PeerMacAddr: DOT11_MAC_ADDRESS, + AlgorithmId: DOT11_CIPHER_ALGORITHM, + Direction: DOT11_DIRECTION, + bDelete: BOOLEAN, + bStatic: BOOLEAN, + usKeyLength: USHORT, + ucKey: [UCHAR; 1], +}} +pub type PDOT11_CIPHER_KEY_MAPPING_KEY_VALUE = *mut DOT11_CIPHER_KEY_MAPPING_KEY_VALUE; +pub const OID_DOT11_ENUM_ASSOCIATION_INFO: u32 = + NWF_DEFINE_OID!(141, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +ENUM!{enum DOT11_ASSOCIATION_STATE { + dot11_assoc_state_zero = 0, + dot11_assoc_state_unauth_unassoc = 1, + dot11_assoc_state_auth_unassoc = 2, + dot11_assoc_state_auth_assoc = 3, +}} +pub type PDOT11_ASSOCIATION_STATE = *mut DOT11_ASSOCIATION_STATE; +STRUCT!{struct DOT11_ASSOCIATION_INFO_EX { + PeerMacAddress: DOT11_MAC_ADDRESS, + BSSID: DOT11_MAC_ADDRESS, + usCapabilityInformation: USHORT, + usListenInterval: USHORT, + ucPeerSupportedRates: [UCHAR; 255], + usAssociationID: USHORT, + dot11AssociationState: DOT11_ASSOCIATION_STATE, + dot11PowerMode: DOT11_POWER_MODE, + liAssociationUpTime: LARGE_INTEGER, + ullNumOfTxPacketSuccesses: ULONGLONG, + ullNumOfTxPacketFailures: ULONGLONG, + ullNumOfRxPacketSuccesses: ULONGLONG, + ullNumOfRxPacketFailures: ULONGLONG, +}} +pub type PDOT11_ASSOCIATION_INFO_EX = *mut DOT11_ASSOCIATION_INFO_EX; +STRUCT!{struct DOT11_ASSOCIATION_INFO_LIST { + Header: NDIS_OBJECT_HEADER, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + dot11AssocInfo: [DOT11_ASSOCIATION_INFO_EX; 1], +}} +pub type PDOT11_ASSOCIATION_INFO_LIST = *mut DOT11_ASSOCIATION_INFO_LIST; +pub const DOT11_ASSOCIATION_INFO_LIST_REVISION_1: UCHAR = 1; +pub const OID_DOT11_DISCONNECT_REQUEST: u32 = + NWF_DEFINE_OID!(142, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_UNICAST_USE_GROUP_ENABLED: u32 = + NWF_DEFINE_OID!(143, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_HARDWARE_PHY_STATE: u32 = + NWF_DEFINE_OID!(144, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_DESIRED_PHY_LIST: u32 = + NWF_DEFINE_OID!(145, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +STRUCT!{struct DOT11_PHY_ID_LIST { + Header: NDIS_OBJECT_HEADER, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + dot11PhyId: [ULONG; 1], +}} +pub type PDOT11_PHY_ID_LIST = *mut DOT11_PHY_ID_LIST; +pub const DOT11_PHY_ID_LIST_REVISION_1: UCHAR = 1; +pub const DOT11_PHY_ID_ANY: ULONG = 0xffffffff; +pub const OID_DOT11_CURRENT_PHY_ID: u32 = + NWF_DEFINE_OID!(146, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_MEDIA_STREAMING_ENABLED: u32 = + NWF_DEFINE_OID!(147, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_UNREACHABLE_DETECTION_THRESHOLD: u32 = + NWF_DEFINE_OID!(148, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_ACTIVE_PHY_LIST: u32 = + NWF_DEFINE_OID!(149, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_EXTSTA_CAPABILITY: u32 = + NWF_DEFINE_OID!(150, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +STRUCT!{struct DOT11_EXTSTA_CAPABILITY { + Header: NDIS_OBJECT_HEADER, + uScanSSIDListSize: ULONG, + uDesiredBSSIDListSize: ULONG, + uDesiredSSIDListSize: ULONG, + uExcludedMacAddressListSize: ULONG, + uPrivacyExemptionListSize: ULONG, + uKeyMappingTableSize: ULONG, + uDefaultKeyTableSize: ULONG, + uWEPKeyValueMaxLength: ULONG, + uPMKIDCacheSize: ULONG, + uMaxNumPerSTADefaultKeyTables: ULONG, +}} +pub type PDOT11_EXTSTA_CAPABILITY = *mut DOT11_EXTSTA_CAPABILITY; +pub const DOT11_EXTSTA_CAPABILITY_REVISION_1: UCHAR = 1; +pub const OID_DOT11_DATA_RATE_MAPPING_TABLE: u32 = + NWF_DEFINE_OID!(151, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +STRUCT!{struct DOT11_DATA_RATE_MAPPING_ENTRY { + ucDataRateIndex: UCHAR, + ucDataRateFlag: UCHAR, + usDataRateValue: USHORT, +}} +pub type PDOT11_DATA_RATE_MAPPING_ENTRY = *mut DOT11_DATA_RATE_MAPPING_ENTRY; +STRUCT!{struct DOT11_DATA_RATE_MAPPING_TABLE { + Header: NDIS_OBJECT_HEADER, + uDataRateMappingLength: ULONG, + DataRateMappingEntries: [DOT11_DATA_RATE_MAPPING_ENTRY; DOT11_RATE_SET_MAX_LENGTH], +}} +pub type PDOT11_DATA_RATE_MAPPING_TABLE = *mut DOT11_DATA_RATE_MAPPING_TABLE; +pub const DOT11_DATA_RATE_MAPPING_TABLE_REVISION_1: UCHAR = 1; +pub const DOT11_DATA_RATE_NON_STANDARD: UCHAR = 0x01; +pub const DOT11_DATA_RATE_INDEX_MASK: UCHAR = 0x7f; +pub const OID_DOT11_SUPPORTED_COUNTRY_OR_REGION_STRING: u32 = + NWF_DEFINE_OID!(152, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +STRUCT!{struct DOT11_COUNTRY_OR_REGION_STRING_LIST { + Header: NDIS_OBJECT_HEADER, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + CountryOrRegionStrings: [DOT11_COUNTRY_OR_REGION_STRING; 1], +}} +pub type PDOT11_COUNTRY_OR_REGION_STRING_LIST = *mut DOT11_COUNTRY_OR_REGION_STRING_LIST; +pub const DOT11_COUNTRY_OR_REGION_STRING_LIST_REVISION_1: UCHAR = 1; +pub const OID_DOT11_DESIRED_COUNTRY_OR_REGION_STRING: u32 = + NWF_DEFINE_OID!(153, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_PORT_STATE_NOTIFICATION: u32 = + NWF_DEFINE_OID!(154, NWF_OPERATIONAL_OID, NWF_OPTIONAL_OID); +STRUCT!{struct DOT11_PORT_STATE_NOTIFICATION { + Header: NDIS_OBJECT_HEADER, + PeerMac: DOT11_MAC_ADDRESS, + bOpen: BOOLEAN, +}} +pub type PDOT11_PORT_STATE_NOTIFICATION = *mut DOT11_PORT_STATE_NOTIFICATION; +pub const DOT11_PORT_STATE_NOTIFICATION_REVISION_1: UCHAR = 1; +pub const OID_DOT11_IBSS_PARAMS: u32 = + NWF_DEFINE_OID!(155, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +STRUCT!{struct DOT11_IBSS_PARAMS { + Header: NDIS_OBJECT_HEADER, + bJoinOnly: BOOLEAN, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_IBSS_PARAMS = *mut DOT11_IBSS_PARAMS; +pub const DOT11_IBSS_PARAMS_REVISION_1: UCHAR = 1; +pub const OID_DOT11_QOS_PARAMS: u32 = NWF_DEFINE_OID!(156, NWF_OPERATIONAL_OID, NWF_OPTIONAL_OID); +STRUCT!{struct DOT11_QOS_PARAMS { + Header: NDIS_OBJECT_HEADER, + ucEnabledQoSProtocolFlags: UCHAR, +}} +pub type PDOT11_QOS_PARAMS = *mut DOT11_QOS_PARAMS; +pub const DOT11_QOS_PARAMS_REVISION_1: UCHAR = 1; +pub const DOT11_QOS_PROTOCOL_FLAG_WMM: UCHAR = 0x01; +pub const DOT11_QOS_PROTOCOL_FLAG_11E: UCHAR = 0x02; +pub const OID_DOT11_SAFE_MODE_ENABLED: u32 = + NWF_DEFINE_OID!(157, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_HIDDEN_NETWORK_ENABLED: u32 = + NWF_DEFINE_OID!(158, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_ASSOCIATION_PARAMS: u32 = + NWF_DEFINE_OID!(159, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +STRUCT!{struct DOT11_ASSOCIATION_PARAMS { + Header: NDIS_OBJECT_HEADER, + BSSID: DOT11_MAC_ADDRESS, + uAssocRequestIEsOffset: ULONG, + uAssocRequestIEsLength: ULONG, +}} +pub type PDOT11_ASSOCIATION_PARAMS = *mut DOT11_ASSOCIATION_PARAMS; +pub const DOT11_ASSOCIATION_PARAMS_REVISION_1: UCHAR = 1; +pub const OID_DOT11_SAFE_MODE_HT_ENABLED: u32 = + NWF_DEFINE_OID!(160, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); +pub const DOT11_MAX_NUM_OF_FRAGMENTS: USHORT = 16; +pub const DOT11_PRIORITY_CONTENTION: i32 = 0; +pub const DOT11_PRIORITY_CONTENTION_FREE: i32 = 1; +pub const DOT11_SERVICE_CLASS_REORDERABLE_MULTICAST: i32 = 0; +pub const DOT11_SERVICE_CLASS_STRICTLY_ORDERED: i32 = 1; +pub const DOT11_FLAGS_80211B_SHORT_PREAMBLE: u32 = 0x00000001; +pub const DOT11_FLAGS_80211B_PBCC: u32 = 0x00000002; +pub const DOT11_FLAGS_80211B_CHANNEL_AGILITY: u32 = 0x00000004; +pub const DOT11_FLAGS_PS_ON: u32 = 0x00000008; +pub const DOT11_FLAGS_80211G_DSSS_OFDM: u32 = 0x00000010; +pub const DOT11_FLAGS_80211G_USE_PROTECTION: u32 = 0x00000020; +pub const DOT11_FLAGS_80211G_NON_ERP_PRESENT: u32 = 0x00000040; +pub const DOT11_FLAGS_80211G_BARKER_PREAMBLE_MODE: u32 = 0x00000080; +pub const DOT11_WME_PACKET: u32 = 0x00000100; +STRUCT!{struct DOT11_FRAGMENT_DESCRIPTOR { + uOffset: ULONG, + uLength: ULONG, +}} +pub type PDOT11_FRAGMENT_DESCRIPTOR = *mut DOT11_FRAGMENT_DESCRIPTOR; +STRUCT!{struct DOT11_PER_MSDU_COUNTERS { + uTransmittedFragmentCount: ULONG, + uRetryCount: ULONG, + uRTSSuccessCount: ULONG, + uRTSFailureCount: ULONG, + uACKFailureCount: ULONG, +}} +pub type PDOT11_PER_MSDU_COUNTERS = *mut DOT11_PER_MSDU_COUNTERS; +STRUCT!{struct DOT11_HRDSSS_PHY_ATTRIBUTES { + bShortPreambleOptionImplemented: BOOLEAN, + bPBCCOptionImplemented: BOOLEAN, + bChannelAgilityPresent: BOOLEAN, + uHRCCAModeSupported: ULONG, +}} +pub type PDOT11_HRDSSS_PHY_ATTRIBUTES = *mut DOT11_HRDSSS_PHY_ATTRIBUTES; +STRUCT!{struct DOT11_OFDM_PHY_ATTRIBUTES { + uFrequencyBandsSupported: ULONG, +}} +pub type PDOT11_OFDM_PHY_ATTRIBUTES = *mut DOT11_OFDM_PHY_ATTRIBUTES; +STRUCT!{struct DOT11_ERP_PHY_ATTRIBUTES { + HRDSSSAttributes: DOT11_HRDSSS_PHY_ATTRIBUTES, + bERPPBCCOptionImplemented: BOOLEAN, + bDSSSOFDMOptionImplemented: BOOLEAN, + bShortSlotTimeOptionImplemented: BOOLEAN, +}} +pub type PDOT11_ERP_PHY_ATTRIBUTES = *mut DOT11_ERP_PHY_ATTRIBUTES; +pub const DOT11_PHY_ATTRIBUTES_REVISION_1: UCHAR = 1; +UNION!{union DOT11_PHY_ATTRIBUTES_u { + [u32; 3], + HRDSSSAttributes HRDSSSAttributes_mut: DOT11_HRDSSS_PHY_ATTRIBUTES, + OFDMAttributes OFDMAttributes_mut: DOT11_OFDM_PHY_ATTRIBUTES, + ERPAttributes ERPAttributes_mut: DOT11_ERP_PHY_ATTRIBUTES, +}} +STRUCT!{struct DOT11_PHY_ATTRIBUTES { + Header: NDIS_OBJECT_HEADER, + PhyType: DOT11_PHY_TYPE, + bHardwarePhyState: BOOLEAN, + bSoftwarePhyState: BOOLEAN, + bCFPollable: BOOLEAN, + uMPDUMaxLength: ULONG, + TempType: DOT11_TEMP_TYPE, + DiversitySupport: DOT11_DIVERSITY_SUPPORT, + u: DOT11_PHY_ATTRIBUTES_u, + uNumberSupportedPowerLevels: ULONG, + TxPowerLevels: [ULONG; 8], + uNumDataRateMappingEntries: ULONG, + DataRateMappingEntries: [DOT11_DATA_RATE_MAPPING_ENTRY; DOT11_RATE_SET_MAX_LENGTH], + SupportedDataRatesValue: DOT11_SUPPORTED_DATA_RATES_VALUE_V2, +}} +pub type PDOT11_PHY_ATTRIBUTES = *mut DOT11_PHY_ATTRIBUTES; +pub const DOT11_EXTSTA_ATTRIBUTES_SAFEMODE_OID_SUPPORTED: UINT8 = 0x1; +pub const DOT11_EXTSTA_ATTRIBUTES_SAFEMODE_CERTIFIED: UINT8 = 0x2; +pub const DOT11_EXTSTA_ATTRIBUTES_SAFEMODE_RESERVED: UINT8 = 0xC; +pub const DOT11_EXTSTA_ATTRIBUTES_REVISION_1: UCHAR = 1; +pub const DOT11_EXTSTA_ATTRIBUTES_REVISION_2: UCHAR = 2; +pub const DOT11_EXTSTA_ATTRIBUTES_REVISION_3: UCHAR = 3; +pub const DOT11_EXTSTA_ATTRIBUTES_REVISION_4: UCHAR = 4; +STRUCT!{struct DOT11_EXTSTA_ATTRIBUTES { + Header: NDIS_OBJECT_HEADER, + uScanSSIDListSize: ULONG, + uDesiredBSSIDListSize: ULONG, + uDesiredSSIDListSize: ULONG, + uExcludedMacAddressListSize: ULONG, + uPrivacyExemptionListSize: ULONG, + uKeyMappingTableSize: ULONG, + uDefaultKeyTableSize: ULONG, + uWEPKeyValueMaxLength: ULONG, + uPMKIDCacheSize: ULONG, + uMaxNumPerSTADefaultKeyTables: ULONG, + bStrictlyOrderedServiceClassImplemented: BOOLEAN, + ucSupportedQoSProtocolFlags: UCHAR, + bSafeModeImplemented: BOOLEAN, + uNumSupportedCountryOrRegionStrings: ULONG, + pSupportedCountryOrRegionStrings: PDOT11_COUNTRY_OR_REGION_STRING, + uInfraNumSupportedUcastAlgoPairs: ULONG, + pInfraSupportedUcastAlgoPairs: PDOT11_AUTH_CIPHER_PAIR, + uInfraNumSupportedMcastAlgoPairs: ULONG, + pInfraSupportedMcastAlgoPairs: PDOT11_AUTH_CIPHER_PAIR, + uAdhocNumSupportedUcastAlgoPairs: ULONG, + pAdhocSupportedUcastAlgoPairs: PDOT11_AUTH_CIPHER_PAIR, + uAdhocNumSupportedMcastAlgoPairs: ULONG, + pAdhocSupportedMcastAlgoPairs: PDOT11_AUTH_CIPHER_PAIR, + bAutoPowerSaveMode: BOOLEAN, + uMaxNetworkOffloadListSize: ULONG, + bMFPCapable: BOOLEAN, + uInfraNumSupportedMcastMgmtAlgoPairs: ULONG, + pInfraSupportedMcastMgmtAlgoPairs: PDOT11_AUTH_CIPHER_PAIR, + bNeighborReportSupported: BOOLEAN, + bAPChannelReportSupported: BOOLEAN, + bActionFramesSupported: BOOLEAN, + bANQPQueryOffloadSupported: BOOLEAN, + bHESSIDConnectionSupported: BOOLEAN, +}} +pub type PDOT11_EXTSTA_ATTRIBUTES = *mut DOT11_EXTSTA_ATTRIBUTES; +STRUCT!{struct DOT11_RECV_EXTENSION_INFO { + uVersion: ULONG, + pvReserved: PVOID, + dot11PhyType: DOT11_PHY_TYPE, + uChCenterFrequency: ULONG, + lRSSI: LONG, + lRSSIMin: LONG, + lRSSIMax: LONG, + uRSSI: ULONG, + ucPriority: UCHAR, + ucDataRate: UCHAR, + ucPeerMacAddress: [UCHAR; 6], + dwExtendedStatus: ULONG, + hWEPOffloadContext: HANDLE, + hAuthOffloadContext: HANDLE, + usWEPAppliedMask: USHORT, + usWPAMSDUPriority: USHORT, + dot11LowestIV48Counter: DOT11_IV48_COUNTER, + usDot11LeftRWBitMap: USHORT, + dot11HighestIV48Counter: DOT11_IV48_COUNTER, + usDot11RightRWBitMap: USHORT, + usNumberOfMPDUsReceived: USHORT, + usNumberOfFragments: USHORT, + pNdisPackets: [PVOID; 1], +}} +pub type PDOT11_RECV_EXTENSION_INFO = *mut DOT11_RECV_EXTENSION_INFO; +STRUCT!{struct DOT11_RECV_EXTENSION_INFO_V2 { + uVersion: ULONG, + pvReserved: PVOID, + dot11PhyType: DOT11_PHY_TYPE, + uChCenterFrequency: ULONG, + lRSSI: LONG, + uRSSI: ULONG, + ucPriority: UCHAR, + ucDataRate: UCHAR, + ucPeerMacAddress: [UCHAR; 6], + dwExtendedStatus: ULONG, + hWEPOffloadContext: HANDLE, + hAuthOffloadContext: HANDLE, + usWEPAppliedMask: USHORT, + usWPAMSDUPriority: USHORT, + dot11LowestIV48Counter: DOT11_IV48_COUNTER, + usDot11LeftRWBitMap: USHORT, + dot11HighestIV48Counter: DOT11_IV48_COUNTER, + usDot11RightRWBitMap: USHORT, + usNumberOfMPDUsReceived: USHORT, + usNumberOfFragments: USHORT, + pNdisPackets: [PVOID; 1], +}} +pub type PDOT11_RECV_EXTENSION_INFO_V2 = *mut DOT11_RECV_EXTENSION_INFO_V2; +pub const DOT11_STATUS_SUCCESS: NDIS_STATUS = 0x00000001; +pub const DOT11_STATUS_RETRY_LIMIT_EXCEEDED: NDIS_STATUS = 0x00000002; +pub const DOT11_STATUS_UNSUPPORTED_PRIORITY: NDIS_STATUS = 0x00000004; +pub const DOT11_STATUS_UNSUPPORTED_SERVICE_CLASS: NDIS_STATUS = 0x00000008; +pub const DOT11_STATUS_UNAVAILABLE_PRIORITY: NDIS_STATUS = 0x00000010; +pub const DOT11_STATUS_UNAVAILABLE_SERVICE_CLASS: NDIS_STATUS = 0x00000020; +pub const DOT11_STATUS_XMIT_MSDU_TIMER_EXPIRED: NDIS_STATUS = 0x00000040; +pub const DOT11_STATUS_UNAVAILABLE_BSS: NDIS_STATUS = 0x00000080; +pub const DOT11_STATUS_EXCESSIVE_DATA_LENGTH: NDIS_STATUS = 0x00000100; +pub const DOT11_STATUS_ENCRYPTION_FAILED: NDIS_STATUS = 0x00000200; +pub const DOT11_STATUS_WEP_KEY_UNAVAILABLE: NDIS_STATUS = 0x00000400; +pub const DOT11_STATUS_ICV_VERIFIED: NDIS_STATUS = 0x00000800; +pub const DOT11_STATUS_PACKET_REASSEMBLED: NDIS_STATUS = 0x00001000; +pub const DOT11_STATUS_PACKET_NOT_REASSEMBLED: NDIS_STATUS = 0x00002000; +pub const DOT11_STATUS_GENERATE_AUTH_FAILED: NDIS_STATUS = 0x00004000; +pub const DOT11_STATUS_AUTH_NOT_VERIFIED: NDIS_STATUS = 0x00008000; +pub const DOT11_STATUS_AUTH_VERIFIED: NDIS_STATUS = 0x00010000; +pub const DOT11_STATUS_AUTH_FAILED: NDIS_STATUS = 0x00020000; +pub const DOT11_STATUS_PS_LIFETIME_EXPIRED: NDIS_STATUS = 0x00040000; +STRUCT!{struct DOT11_STATUS_INDICATION { + uStatusType: ULONG, + ndisStatus: NDIS_STATUS, +}} +pub type PDOT11_STATUS_INDICATION = *mut DOT11_STATUS_INDICATION; +pub const DOT11_STATUS_RESET_CONFIRM: ULONG = 4; +pub const DOT11_STATUS_SCAN_CONFIRM: ULONG = 1; +pub const DOT11_STATUS_JOIN_CONFIRM: ULONG = 2; +pub const DOT11_STATUS_START_CONFIRM: ULONG = 3; +pub const DOT11_STATUS_AP_JOIN_CONFIRM: ULONG = 5; +pub const DOT11_STATUS_MPDU_MAX_LENGTH_CHANGED: ULONG = 6; +STRUCT!{struct DOT11_MPDU_MAX_LENGTH_INDICATION { + Header: NDIS_OBJECT_HEADER, + uPhyId: ULONG, + uMPDUMaxLength: ULONG, +}} +pub type PDOT11_MPDU_MAX_LENGTH_INDICATION = *mut DOT11_MPDU_MAX_LENGTH_INDICATION; +pub const DOT11_MPDU_MAX_LENGTH_INDICATION_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_ASSOCIATION_START_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + MacAddr: DOT11_MAC_ADDRESS, + SSID: DOT11_SSID, + uIHVDataOffset: ULONG, + uIHVDataSize: ULONG, +}} +pub type PDOT11_ASSOCIATION_START_PARAMETERS = *mut DOT11_ASSOCIATION_START_PARAMETERS; +pub const DOT11_ASSOCIATION_START_PARAMETERS_REVISION_1: UCHAR = 1; +pub const DOT11_ENCAP_RFC_1042: USHORT = 1; +pub const DOT11_ENCAP_802_1H: USHORT = 2; +STRUCT!{struct DOT11_ENCAP_ENTRY { + usEtherType: USHORT, + usEncapType: USHORT, +}} +pub type PDOT11_ENCAP_ENTRY = *mut DOT11_ENCAP_ENTRY; +ENUM!{enum DOT11_DS_INFO { + DOT11_DS_CHANGED = 0, + DOT11_DS_UNCHANGED = 1, + DOT11_DS_UNKNOWN = 2, +}} +pub type PDOT11_DS_INFO = *mut DOT11_DS_INFO; +pub type DOT11_ASSOC_STATUS = ULONG; +pub const DOT11_ASSOC_STATUS_SUCCESS: DOT11_ASSOC_STATUS = 0; +pub const DOT11_ASSOC_STATUS_FAILURE: DOT11_ASSOC_STATUS = 0x00000001; +pub const DOT11_ASSOC_STATUS_UNREACHABLE: DOT11_ASSOC_STATUS = 0x00000002; +pub const DOT11_ASSOC_STATUS_RADIO_OFF: DOT11_ASSOC_STATUS = 0x00000003; +pub const DOT11_ASSOC_STATUS_PHY_DISABLED: DOT11_ASSOC_STATUS = 0x00000004; +pub const DOT11_ASSOC_STATUS_CANCELLED: DOT11_ASSOC_STATUS = 0x00000005; +pub const DOT11_ASSOC_STATUS_CANDIDATE_LIST_EXHAUSTED: DOT11_ASSOC_STATUS = 0x00000006; +pub const DOT11_ASSOC_STATUS_DISASSOCIATED_BY_OS: DOT11_ASSOC_STATUS = 0x00000007; +pub const DOT11_ASSOC_STATUS_DISASSOCIATED_BY_ROAMING: DOT11_ASSOC_STATUS = 0x00000008; +pub const DOT11_ASSOC_STATUS_DISASSOCIATED_BY_RESET: DOT11_ASSOC_STATUS = 0x00000009; +pub const DOT11_ASSOC_STATUS_SYSTEM_ERROR: DOT11_ASSOC_STATUS = 0x0000000a; +pub const DOT11_ASSOC_STATUS_ROAMING_BETTER_AP_FOUND: DOT11_ASSOC_STATUS = 0x0000000b; +pub const DOT11_ASSOC_STATUS_ROAMING_ASSOCIATION_LOST: DOT11_ASSOC_STATUS = 0x0000000c; +pub const DOT11_ASSOC_STATUS_ROAMING_ADHOC: DOT11_ASSOC_STATUS = 0x0000000d; +pub const DOT11_ASSOC_STATUS_PEER_DEAUTHENTICATED: DOT11_ASSOC_STATUS = 0x00010000; +pub const DOT11_ASSOC_STATUS_PEER_DEAUTHENTICATED_START: DOT11_ASSOC_STATUS = + DOT11_ASSOC_STATUS_PEER_DEAUTHENTICATED; +pub const DOT11_ASSOC_STATUS_PEER_DEAUTHENTICATED_END: DOT11_ASSOC_STATUS = 0x0001ffff; +pub const DOT11_ASSOC_STATUS_PEER_DISASSOCIATED: DOT11_ASSOC_STATUS = 0x00020000; +pub const DOT11_ASSOC_STATUS_PEER_DISASSOCIATED_START: DOT11_ASSOC_STATUS = + DOT11_ASSOC_STATUS_PEER_DISASSOCIATED; +pub const DOT11_ASSOC_STATUS_PEER_DISASSOCIATED_END: DOT11_ASSOC_STATUS = 0x0002ffff; +pub const DOT11_ASSOC_STATUS_ASSOCIATION_RESPONSE: DOT11_ASSOC_STATUS = 0x00030000; +pub const DOT11_ASSOC_STATUS_ASSOCIATION_RESPONSE_START: DOT11_ASSOC_STATUS = + DOT11_ASSOC_STATUS_ASSOCIATION_RESPONSE; +pub const DOT11_ASSOC_STATUS_ASSOCIATION_RESPONSE_END: DOT11_ASSOC_STATUS = 0x0003ffff; +pub const DOT11_ASSOC_STATUS_REASON_CODE_MASK: DOT11_ASSOC_STATUS = 0xffff; +pub const DOT11_ASSOC_STATUS_IHV_START: DOT11_ASSOC_STATUS = 0x80000000; +pub const DOT11_ASSOC_STATUS_IHV_END: DOT11_ASSOC_STATUS = 0xffffffff; +STRUCT!{struct DOT11_ASSOCIATION_COMPLETION_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + MacAddr: DOT11_MAC_ADDRESS, + uStatus: DOT11_ASSOC_STATUS, + bReAssocReq: BOOLEAN, + bReAssocResp: BOOLEAN, + uAssocReqOffset: ULONG, + uAssocReqSize: ULONG, + uAssocRespOffset: ULONG, + uAssocRespSize: ULONG, + uBeaconOffset: ULONG, + uBeaconSize: ULONG, + uIHVDataOffset: ULONG, + uIHVDataSize: ULONG, + AuthAlgo: DOT11_AUTH_ALGORITHM, + UnicastCipher: DOT11_CIPHER_ALGORITHM, + MulticastCipher: DOT11_CIPHER_ALGORITHM, + uActivePhyListOffset: ULONG, + uActivePhyListSize: ULONG, + bFourAddressSupported: BOOLEAN, + bPortAuthorized: BOOLEAN, + ucActiveQoSProtocol: UCHAR, + DSInfo: DOT11_DS_INFO, + uEncapTableOffset: ULONG, + uEncapTableSize: ULONG, + MulticastMgmtCipher: DOT11_CIPHER_ALGORITHM, + uAssocComebackTime: ULONG, +}} +pub type PDOT11_ASSOCIATION_COMPLETION_PARAMETERS = *mut DOT11_ASSOCIATION_COMPLETION_PARAMETERS; +pub const DOT11_ASSOCIATION_COMPLETION_PARAMETERS_REVISION_1: UCHAR = 1; +pub const DOT11_ASSOCIATION_COMPLETION_PARAMETERS_REVISION_2: UCHAR = 2; +STRUCT!{struct DOT11_CONNECTION_START_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + BSSType: DOT11_BSS_TYPE, + AdhocBSSID: DOT11_MAC_ADDRESS, + AdhocSSID: DOT11_SSID, +}} +pub type PDOT11_CONNECTION_START_PARAMETERS = *mut DOT11_CONNECTION_START_PARAMETERS; +pub const DOT11_CONNECTION_START_PARAMETERS_REVISION_1: UCHAR = 1; +pub const DOT11_CONNECTION_STATUS_SUCCESS: DOT11_ASSOC_STATUS = DOT11_ASSOC_STATUS_SUCCESS; +pub const DOT11_CONNECTION_STATUS_FAILURE: DOT11_ASSOC_STATUS = DOT11_ASSOC_STATUS_FAILURE; +pub const DOT11_CONNECTION_STATUS_CANDIDATE_LIST_EXHAUSTED: DOT11_ASSOC_STATUS = + DOT11_ASSOC_STATUS_CANDIDATE_LIST_EXHAUSTED; +pub const DOT11_CONNECTION_STATUS_PHY_POWER_DOWN: DOT11_ASSOC_STATUS = + DOT11_ASSOC_STATUS_RADIO_OFF; +pub const DOT11_CONNECTION_STATUS_CANCELLED: DOT11_ASSOC_STATUS = DOT11_ASSOC_STATUS_CANCELLED; +pub const DOT11_CONNECTION_STATUS_IHV_START: DOT11_ASSOC_STATUS = DOT11_ASSOC_STATUS_IHV_START; +pub const DOT11_CONNECTION_STATUS_IHV_END: DOT11_ASSOC_STATUS = DOT11_ASSOC_STATUS_IHV_END; +STRUCT!{struct DOT11_CONNECTION_COMPLETION_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + uStatus: DOT11_ASSOC_STATUS, +}} +pub type PDOT11_CONNECTION_COMPLETION_PARAMETERS = *mut DOT11_CONNECTION_COMPLETION_PARAMETERS; +pub const DOT11_CONNECTION_COMPLETION_PARAMETERS_REVISION_1: UCHAR = 1; +pub const DOT11_ROAMING_REASON_BETTER_AP_FOUND: DOT11_ASSOC_STATUS = + DOT11_ASSOC_STATUS_ROAMING_BETTER_AP_FOUND; +pub const DOT11_ROAMING_REASON_ASSOCIATION_LOST: DOT11_ASSOC_STATUS = + DOT11_ASSOC_STATUS_ROAMING_ASSOCIATION_LOST; +pub const DOT11_ROAMING_REASON_ADHOC: DOT11_ASSOC_STATUS = DOT11_ASSOC_STATUS_ROAMING_ADHOC; +pub const DOT11_ROAMING_REASON_IHV_START: DOT11_ASSOC_STATUS = DOT11_ASSOC_STATUS_IHV_START; +pub const DOT11_ROAMING_REASON_IHV_END: DOT11_ASSOC_STATUS = DOT11_ASSOC_STATUS_IHV_END; +STRUCT!{struct DOT11_ROAMING_START_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + AdhocBSSID: DOT11_MAC_ADDRESS, + AdhocSSID: DOT11_SSID, + uRoamingReason: DOT11_ASSOC_STATUS, +}} +pub type PDOT11_ROAMING_START_PARAMETERS = *mut DOT11_ROAMING_START_PARAMETERS; +pub const DOT11_ROAMING_START_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_ROAMING_COMPLETION_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + uStatus: DOT11_ASSOC_STATUS, +}} +pub type PDOT11_ROAMING_COMPLETION_PARAMETERS = *mut DOT11_ROAMING_COMPLETION_PARAMETERS; +pub const DOT11_ROAMING_COMPLETION_PARAMETERS_REVISION_1: UCHAR = 1; +pub const DOT11_DISASSOC_REASON_OS: DOT11_ASSOC_STATUS = DOT11_ASSOC_STATUS_DISASSOCIATED_BY_OS; +pub const DOT11_DISASSOC_REASON_PEER_UNREACHABLE: DOT11_ASSOC_STATUS = + DOT11_ASSOC_STATUS_UNREACHABLE; +pub const DOT11_DISASSOC_REASON_PEER_DEAUTHENTICATED: DOT11_ASSOC_STATUS = + DOT11_ASSOC_STATUS_PEER_DEAUTHENTICATED; +pub const DOT11_DISASSOC_REASON_PEER_DISASSOCIATED: DOT11_ASSOC_STATUS = + DOT11_ASSOC_STATUS_PEER_DISASSOCIATED; +pub const DOT11_DISASSOC_REASON_RADIO_OFF: DOT11_ASSOC_STATUS = DOT11_ASSOC_STATUS_RADIO_OFF; +pub const DOT11_DISASSOC_REASON_PHY_DISABLED: DOT11_ASSOC_STATUS = DOT11_ASSOC_STATUS_PHY_DISABLED; +pub const DOT11_DISASSOC_REASON_IHV_START: DOT11_ASSOC_STATUS = DOT11_ASSOC_STATUS_IHV_START; +pub const DOT11_DISASSOC_REASON_IHV_END: DOT11_ASSOC_STATUS = DOT11_ASSOC_STATUS_IHV_END; +STRUCT!{struct DOT11_DISASSOCIATION_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + MacAddr: DOT11_MAC_ADDRESS, + uReason: DOT11_ASSOC_STATUS, + uIHVDataOffset: ULONG, + uIHVDataSize: ULONG, +}} +pub type PDOT11_DISASSOCIATION_PARAMETERS = *mut DOT11_DISASSOCIATION_PARAMETERS; +pub const DOT11_DISASSOCIATION_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_TKIPMIC_FAILURE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + bDefaultKeyFailure: BOOLEAN, + uKeyIndex: ULONG, + PeerMac: DOT11_MAC_ADDRESS, +}} +pub type PDOT11_TKIPMIC_FAILURE_PARAMETERS = *mut DOT11_TKIPMIC_FAILURE_PARAMETERS; +pub const DOT11_TKIPMIC_FAILURE_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_PMKID_CANDIDATE_LIST_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + uCandidateListSize: ULONG, + uCandidateListOffset: ULONG, +}} +pub type PDOT11_PMKID_CANDIDATE_LIST_PARAMETERS = *mut DOT11_PMKID_CANDIDATE_LIST_PARAMETERS; +pub const DOT11_PMKID_CANDIDATE_LIST_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_BSSID_CANDIDATE { + BSSID: DOT11_MAC_ADDRESS, + uFlags: ULONG, +}} +pub type PDOT11_BSSID_CANDIDATE = *mut DOT11_BSSID_CANDIDATE; +pub const DOT11_PMKID_CANDIDATE_PREAUTH_ENABLED: ULONG = 0x00000001; +STRUCT!{struct DOT11_PHY_STATE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + uPhyId: ULONG, + bHardwarePhyState: BOOLEAN, + bSoftwarePhyState: BOOLEAN, +}} +pub type PDOT11_PHY_STATE_PARAMETERS = *mut DOT11_PHY_STATE_PARAMETERS; +pub const DOT11_PHY_STATE_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_LINK_QUALITY_ENTRY { + PeerMacAddr: DOT11_MAC_ADDRESS, + ucLinkQuality: UCHAR, +}} +pub type PDOT11_LINK_QUALITY_ENTRY = *mut DOT11_LINK_QUALITY_ENTRY; +STRUCT!{struct DOT11_LINK_QUALITY_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + uLinkQualityListSize: ULONG, + uLinkQualityListOffset: ULONG, +}} +pub type PDOT11_LINK_QUALITY_PARAMETERS = *mut DOT11_LINK_QUALITY_PARAMETERS; +pub const DOT11_LINK_QUALITY_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_EXTSTA_SEND_CONTEXT { + Header: NDIS_OBJECT_HEADER, + usExemptionActionType: USHORT, + uPhyId: ULONG, + uDelayedSleepValue: ULONG, + pvMediaSpecificInfo: PVOID, + uSendFlags: ULONG, +}} +pub type PDOT11_EXTSTA_SEND_CONTEXT = *mut DOT11_EXTSTA_SEND_CONTEXT; +pub type DOT11_EXTAP_SEND_CONTEXT = DOT11_EXTSTA_SEND_CONTEXT; +pub type PDOT11_EXTAP_SEND_CONTEXT = *mut DOT11_EXTSTA_SEND_CONTEXT; +pub const DOT11_EXTSTA_SEND_CONTEXT_REVISION_1: UCHAR = 1; +pub const DOT11_RECV_FLAG_RAW_PACKET: ULONG = 0x00000001; +pub const DOT11_RECV_FLAG_RAW_PACKET_FCS_FAILURE: ULONG = 0x00000002; +pub const DOT11_RECV_FLAG_RAW_PACKET_TIMESTAMP: ULONG = 0x00000004; +pub const DOT11_EXTSTA_RECV_CONTEXT_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_EXTSTA_RECV_CONTEXT { + Header: NDIS_OBJECT_HEADER, + uReceiveFlags: ULONG, + uPhyId: ULONG, + uChCenterFrequency: ULONG, + usNumberOfMPDUsReceived: USHORT, + lRSSI: LONG, + ucDataRate: UCHAR, + uSizeMediaSpecificInfo: ULONG, + pvMediaSpecificInfo: PVOID, + ullTimestamp: ULONGLONG, +}} +pub type PDOT11_EXTSTA_RECV_CONTEXT = *mut DOT11_EXTSTA_RECV_CONTEXT; +pub type DOT11_EXTAP_RECV_CONTEXT = DOT11_EXTSTA_RECV_CONTEXT; +pub type PDOT11_EXTAP_RECV_CONTEXT = *mut DOT11_EXTSTA_RECV_CONTEXT; +pub const OID_DOT11_PRIVATE_OIDS_START: u32 = OID_DOT11_NDIS_START + 1024; +pub const OID_DOT11_CURRENT_ADDRESS: u32 = OID_DOT11_PRIVATE_OIDS_START + 2; +pub const OID_DOT11_PERMANENT_ADDRESS: u32 = OID_DOT11_PRIVATE_OIDS_START + 3; +pub const OID_DOT11_MULTICAST_LIST: u32 = OID_DOT11_PRIVATE_OIDS_START + 4; +pub const OID_DOT11_MAXIMUM_LIST_SIZE: u32 = OID_DOT11_PRIVATE_OIDS_START + 5; +macro_rules! DEFINE_NWF_GUID { + ($name:ident, $ord:expr) => { + DEFINE_GUID!{ + $name, 0x6cb9a43e + $ord, 0xc45f, 0x4039, 0x9f, 0xe6, 0xd0, 0x8c, 0xb0, 0x57, 0x18, + 0x4c + } + }; +} +DEFINE_NWF_GUID!{GUID_NWF_OFFLOAD_CAPABILITY, 0} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_OFFLOAD_CAPABILITY, 1} +DEFINE_NWF_GUID!{GUID_NWF_WEP_OFFLOAD, 2} +DEFINE_NWF_GUID!{GUID_NWF_WEP_UPLOAD, 3} +DEFINE_NWF_GUID!{GUID_NWF_DEFAULT_WEP_OFFLOAD, 4} +DEFINE_NWF_GUID!{GUID_NWF_DEFAULT_WEP_UPLOAD, 5} +DEFINE_NWF_GUID!{GUID_NWF_MPDU_MAX_LENGTH, 6} +DEFINE_NWF_GUID!{GUID_NWF_OPERATION_MODE_CAPABILITY, 7} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_OPERATION_MODE, 8} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_PACKET_FILTER, 9} +DEFINE_NWF_GUID!{GUID_NWF_ATIM_WINDOW, 10} +DEFINE_NWF_GUID!{GUID_NWF_SCAN_REQUEST, 11} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_PHY_TYPE, 12} +DEFINE_NWF_GUID!{GUID_NWF_JOIN_REQUEST, 13} +DEFINE_NWF_GUID!{GUID_NWF_START_REQUEST, 14} +DEFINE_NWF_GUID!{GUID_NWF_UPDATE_IE, 15} +DEFINE_NWF_GUID!{GUID_NWF_RESET_REQUEST, 16} +DEFINE_NWF_GUID!{GUID_NWF_NIC_POWER_STATE, 17} +DEFINE_NWF_GUID!{GUID_NWF_OPTIONAL_CAPABILITY, 18} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_OPTIONAL_CAPABILITY, 19} +DEFINE_NWF_GUID!{GUID_NWF_STATION_ID, 20} +DEFINE_NWF_GUID!{GUID_NWF_MEDIUM_OCCUPANCY_LIMIT, 21} +DEFINE_NWF_GUID!{GUID_NWF_CF_POLLABLE, 22} +DEFINE_NWF_GUID!{GUID_NWF_CFP_PERIOD, 23} +DEFINE_NWF_GUID!{GUID_NWF_CFP_MAX_DURATION, 24} +DEFINE_NWF_GUID!{GUID_NWF_POWER_MGMT_MODE, 25} +DEFINE_NWF_GUID!{GUID_NWF_OPERATIONAL_RATE_SET, 26} +DEFINE_NWF_GUID!{GUID_NWF_BEACON_PERIOD, 27} +DEFINE_NWF_GUID!{GUID_NWF_DTIM_PERIOD, 28} +DEFINE_NWF_GUID!{GUID_NWF_WEP_ICV_ERROR_COUNT, 29} +DEFINE_NWF_GUID!{GUID_NWF_MAC_ADDRESS, 30} +DEFINE_NWF_GUID!{GUID_NWF_RTS_THRESHOLD, 31} +DEFINE_NWF_GUID!{GUID_NWF_SHORT_RETRY_LIMIT, 32} +DEFINE_NWF_GUID!{GUID_NWF_LONG_RETRY_LIMIT, 33} +DEFINE_NWF_GUID!{GUID_NWF_FRAGMENTATION_THRESHOLD, 34} +DEFINE_NWF_GUID!{GUID_NWF_MAX_TRANSMIT_MSDU_LIFETIME, 35} +DEFINE_NWF_GUID!{GUID_NWF_MAX_RECEIVE_LIFETIME, 36} +DEFINE_NWF_GUID!{GUID_NWF_COUNTERS_ENTRY, 37} +DEFINE_NWF_GUID!{GUID_NWF_SUPPORTED_PHY_TYPES, 38} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_REG_DOMAIN, 39} +DEFINE_NWF_GUID!{GUID_NWF_TEMP_TYPE, 40} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_TX_ANTENNA, 41} +DEFINE_NWF_GUID!{GUID_NWF_DIVERSITY_SUPPORT, 42} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_RX_ANTENNA, 43} +DEFINE_NWF_GUID!{GUID_NWF_SUPPORTED_POWER_LEVELS, 44} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_TX_POWER_LEVEL, 45} +DEFINE_NWF_GUID!{GUID_NWF_HOP_TIME, 46} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_CHANNEL_NUMBER, 47} +DEFINE_NWF_GUID!{GUID_NWF_MAX_DWELL_TIME, 48} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_DWELL_TIME, 49} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_SET, 50} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_PATTERN, 51} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_INDEX, 52} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_CHANNEL, 53} +DEFINE_NWF_GUID!{GUID_NWF_CCA_MODE_SUPPORTED, 54} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_CCA_MODE, 55} +DEFINE_NWF_GUID!{GUID_NWF_ED_THRESHOLD, 56} +DEFINE_NWF_GUID!{GUID_NWF_CCA_WATCHDOG_TIMER_MAX, 57} +DEFINE_NWF_GUID!{GUID_NWF_CCA_WATCHDOG_COUNT_MAX, 58} +DEFINE_NWF_GUID!{GUID_NWF_CCA_WATCHDOG_TIMER_MIN, 59} +DEFINE_NWF_GUID!{GUID_NWF_CCA_WATCHDOG_COUNT_MIN, 60} +DEFINE_NWF_GUID!{GUID_NWF_REG_DOMAINS_SUPPORT_VALUE, 61} +DEFINE_NWF_GUID!{GUID_NWF_SUPPORTED_TX_ANTENNA, 62} +DEFINE_NWF_GUID!{GUID_NWF_SUPPORTED_RX_ANTENNA, 63} +DEFINE_NWF_GUID!{GUID_NWF_DIVERSITY_SELECTION_RX, 64} +DEFINE_NWF_GUID!{GUID_NWF_SUPPORTED_DATA_RATES_VALUE, 65} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_FREQUENCY, 66} +DEFINE_NWF_GUID!{GUID_NWF_TI_THRESHOLD, 67} +DEFINE_NWF_GUID!{GUID_NWF_FREQUENCY_BANDS_SUPPORTED, 68} +DEFINE_NWF_GUID!{GUID_NWF_SHORT_PREAMBLE_OPTION_IMPLEMENTED, 69} +DEFINE_NWF_GUID!{GUID_NWF_PBCC_OPTION_IMPLEMENTED, 70} +DEFINE_NWF_GUID!{GUID_NWF_CHANNEL_AGILITY_PRESENT, 71} +DEFINE_NWF_GUID!{GUID_NWF_CHANNEL_AGILITY_ENABLED, 72} +DEFINE_NWF_GUID!{GUID_NWF_HR_CCA_MODE_SUPPORTED, 73} +DEFINE_NWF_GUID!{GUID_NWF_MULTI_DOMAIN_CAPABILITY_IMPLEMENTED, 74} +DEFINE_NWF_GUID!{GUID_NWF_MULTI_DOMAIN_CAPABILITY_ENABLED, 75} +DEFINE_NWF_GUID!{GUID_NWF_COUNTRY_STRING, 76} +DEFINE_NWF_GUID!{GUID_NWF_MULTI_DOMAIN_CAPABILITY, 77} +DEFINE_NWF_GUID!{GUID_NWF_EHCC_PRIME_RADIX, 78} +DEFINE_NWF_GUID!{GUID_NWF_EHCC_NUMBER_OF_CHANNELS_FAMILY_INDEX, 79} +DEFINE_NWF_GUID!{GUID_NWF_EHCC_CAPABILITY_IMPLEMENTED, 80} +DEFINE_NWF_GUID!{GUID_NWF_EHCC_CAPABILITY_ENABLED, 81} +DEFINE_NWF_GUID!{GUID_NWF_HOP_ALGORITHM_ADOPTED, 82} +DEFINE_NWF_GUID!{GUID_NWF_RANDOM_TABLE_FLAG, 83} +DEFINE_NWF_GUID!{GUID_NWF_NUMBER_OF_HOPPING_SETS, 84} +DEFINE_NWF_GUID!{GUID_NWF_HOP_MODULUS, 85} +DEFINE_NWF_GUID!{GUID_NWF_HOP_OFFSET, 86} +DEFINE_NWF_GUID!{GUID_NWF_HOPPING_PATTERN, 87} +DEFINE_NWF_GUID!{GUID_NWF_RANDOM_TABLE_FIELD_NUMBER, 88} +DEFINE_NWF_GUID!{GUID_NWF_WPA_TSC, 89} +DEFINE_NWF_GUID!{GUID_NWF_RSSI_RANGE, 90} +DEFINE_NWF_GUID!{GUID_NWF_RF_USAGE, 91} +DEFINE_NWF_GUID!{GUID_NWF_NIC_SPECIFIC_EXTENSION, 92} +DEFINE_NWF_GUID!{GUID_NWF_AP_JOIN_REQUEST, 93} +DEFINE_NWF_GUID!{GUID_NWF_ERP_PBCC_OPTION_IMPLEMENTED, 94} +DEFINE_NWF_GUID!{GUID_NWF_ERP_PBCC_OPTION_ENABLED, 95} +DEFINE_NWF_GUID!{GUID_NWF_DSSS_OFDM_OPTION_IMPLEMENTED, 96} +DEFINE_NWF_GUID!{GUID_NWF_DSSS_OFDM_OPTION_ENABLED, 97} +DEFINE_NWF_GUID!{GUID_NWF_SHORT_SLOT_TIME_OPTION_IMPLEMENTED, 98} +DEFINE_NWF_GUID!{GUID_NWF_SHORT_SLOT_TIME_OPTION_ENABLED, 99} +DEFINE_NWF_GUID!{GUID_NWF_MAX_MAC_ADDRESS_STATES, 100} +DEFINE_NWF_GUID!{GUID_NWF_RECV_SENSITIVITY_LIST, 101} +DEFINE_NWF_GUID!{GUID_NWF_WME_IMPLEMENTED, 102} +DEFINE_NWF_GUID!{GUID_NWF_WME_ENABLED, 103} +DEFINE_NWF_GUID!{GUID_NWF_WME_AC_PARAMETERS, 104} +DEFINE_NWF_GUID!{GUID_NWF_WME_UPDATE_IE, 105} +DEFINE_NWF_GUID!{GUID_NWF_QOS_TX_QUEUES_SUPPORTED, 106} +DEFINE_NWF_GUID!{GUID_NWF_QOS_TX_DURATION, 107} +DEFINE_NWF_GUID!{GUID_NWF_QOS_TX_MEDIUM_TIME, 108} +DEFINE_NWF_GUID!{GUID_NWF_SUPPORTED_OFDM_FREQUENCY_LIST, 109} +DEFINE_NWF_GUID!{GUID_NWF_SUPPORTED_DSSS_CHANNEL_LIST, 110} +DEFINE_NWF_GUID!{GUID_NWF_AUTO_CONFIG_ENABLED, 120} +DEFINE_NWF_GUID!{GUID_NWF_ENUM_BSS_LIST, 121} +DEFINE_NWF_GUID!{GUID_NWF_FLUSH_BSS_LIST, 122} +DEFINE_NWF_GUID!{GUID_NWF_POWER_MGMT_REQUEST, 123} +DEFINE_NWF_GUID!{GUID_NWF_DESIRED_SSID_LIST, 124} +DEFINE_NWF_GUID!{GUID_NWF_EXCLUDED_MAC_ADDRESS_LIST, 125} +DEFINE_NWF_GUID!{GUID_NWF_DESIRED_BSSID_LIST, 126} +DEFINE_NWF_GUID!{GUID_NWF_DESIRED_BSS_TYPE, 127} +DEFINE_NWF_GUID!{GUID_NWF_PMKID_LIST, 128} +DEFINE_NWF_GUID!{GUID_NWF_CONNECT_REQUEST, 129} +DEFINE_NWF_GUID!{GUID_NWF_EXCLUDE_UNENCRYPTED, 130} +DEFINE_NWF_GUID!{GUID_NWF_STATISTICS, 131} +DEFINE_NWF_GUID!{GUID_NWF_PRIVACY_EXEMPTION_LIST, 132} +DEFINE_NWF_GUID!{GUID_NWF_ENABLED_AUTHENTICATION_ALGORITHM, 133} +DEFINE_NWF_GUID!{GUID_NWF_SUPPORTED_UNICAST_ALGORITHM_PAIR, 134} +DEFINE_NWF_GUID!{GUID_NWF_ENABLED_UNICAST_CIPHER_ALGORITHM, 135} +DEFINE_NWF_GUID!{GUID_NWF_SUPPORTED_MULTICAST_ALGORITHM_PAIR, 136} +DEFINE_NWF_GUID!{GUID_NWF_ENABLED_MULTICAST_CIPHER_ALGORITHM, 137} +DEFINE_NWF_GUID!{GUID_NWF_CIPHER_DEFAULT_KEY_ID, 138} +DEFINE_NWF_GUID!{GUID_NWF_CIPHER_DEFAULT_KEY, 139} +DEFINE_NWF_GUID!{GUID_NWF_CIPHER_KEY_MAPPING_KEY, 140} +DEFINE_NWF_GUID!{GUID_NWF_ENUM_ASSOCIATION_INFO, 141} +DEFINE_NWF_GUID!{GUID_NWF_DISCONNECT_REQUEST, 142} +DEFINE_NWF_GUID!{GUID_NWF_UNICAST_USE_GROUP_ENABLED, 143} +DEFINE_NWF_GUID!{GUID_NWF_PHY_STATE, 144} +DEFINE_NWF_GUID!{GUID_NWF_DESIRED_PHY_LIST, 145} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_PHY_ID, 146} +DEFINE_NWF_GUID!{GUID_NWF_MEDIA_STREAMING_ENABLED, 147} +DEFINE_NWF_GUID!{GUID_NWF_UNREACHABLE_DETECTION_THRESHOLD, 148} +DEFINE_NWF_GUID!{GUID_NWF_ACTIVE_PHY_LIST, 149} +DEFINE_NWF_GUID!{GUID_NWF_EXTSTA_CAPABILITY, 150} +DEFINE_NWF_GUID!{GUID_NWF_DATA_RATE_MAPPING_TABLE, 151} +DEFINE_NWF_GUID!{GUID_NWF_SUPPORTED_COUNTRY_OR_REGION_STRING, 152} +DEFINE_NWF_GUID!{GUID_NWF_DESIRED_COUNTRY_OR_REGION_STRING, 153} +DEFINE_NWF_GUID!{GUID_NWF_PORT_STATE_NOTIFICATION, 154} +DEFINE_NWF_GUID!{GUID_NWF_IBSS_PARAMS, 155} +DEFINE_NWF_GUID!{GUID_NWF_QOS_PARAMS, 156} +DEFINE_NWF_GUID!{GUID_NWF_SAFE_MODE_ENABLED, 157} +DEFINE_NWF_GUID!{GUID_NWF_HIDDEN_NETWORK_ENABLED, 158} +DEFINE_NWF_GUID!{GUID_NWF_ASSOCIATION_PARAMS, 159} +DEFINE_NWF_GUID!{GUID_NWF_CURRENT_ADDRESS, 1024 + 2} +DEFINE_NWF_GUID!{GUID_NWF_PERMANENT_ADDRESS, 1024 + 3} +DEFINE_NWF_GUID!{GUID_NWF_MULTICAST_LIST, 1024 + 4} +DEFINE_NWF_GUID!{GUID_NWF_MAXIMUM_LIST_SIZE, 1024 + 5} +pub const DOT11_EXTAP_ATTRIBUTES_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_EXTAP_ATTRIBUTES { + Header: NDIS_OBJECT_HEADER, + uScanSSIDListSize: ULONG, + uDesiredSSIDListSize: ULONG, + uPrivacyExemptionListSize: ULONG, + uAssociationTableSize: ULONG, + uDefaultKeyTableSize: ULONG, + uWEPKeyValueMaxLength: ULONG, + bStrictlyOrderedServiceClassImplemented: BOOLEAN, + uNumSupportedCountryOrRegionStrings: ULONG, + pSupportedCountryOrRegionStrings: PDOT11_COUNTRY_OR_REGION_STRING, + uInfraNumSupportedUcastAlgoPairs: ULONG, + pInfraSupportedUcastAlgoPairs: PDOT11_AUTH_CIPHER_PAIR, + uInfraNumSupportedMcastAlgoPairs: ULONG, + pInfraSupportedMcastAlgoPairs: PDOT11_AUTH_CIPHER_PAIR, +}} +pub type PDOT11_EXTAP_ATTRIBUTES = *mut DOT11_EXTAP_ATTRIBUTES; +pub const DOT11_INCOMING_ASSOC_STARTED_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_INCOMING_ASSOC_STARTED_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + PeerMacAddr: DOT11_MAC_ADDRESS, +}} +pub type PDOT11_INCOMING_ASSOC_STARTED_PARAMETERS = *mut DOT11_INCOMING_ASSOC_STARTED_PARAMETERS; +pub const DOT11_INCOMING_ASSOC_REQUEST_RECEIVED_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_INCOMING_ASSOC_REQUEST_RECEIVED_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + PeerMacAddr: DOT11_MAC_ADDRESS, + bReAssocReq: BOOLEAN, + uAssocReqOffset: ULONG, + uAssocReqSize: ULONG, +}} +pub type PDOT11_INCOMING_ASSOC_REQUEST_RECEIVED_PARAMETERS = + *mut DOT11_INCOMING_ASSOC_REQUEST_RECEIVED_PARAMETERS; +pub const DOT11_ASSOC_ERROR_SOURCE_OS: UCHAR = 0x0; +pub const DOT11_ASSOC_ERROR_SOURCE_REMOTE: UCHAR = 0x01; +pub const DOT11_ASSOC_ERROR_SOURCE_OTHER: UCHAR = 0xFF; +pub const DOT11_INCOMING_ASSOC_COMPLETION_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_INCOMING_ASSOC_COMPLETION_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + PeerMacAddr: DOT11_MAC_ADDRESS, + uStatus: ULONG, + ucErrorSource: UCHAR, + bReAssocReq: BOOLEAN, + bReAssocResp: BOOLEAN, + uAssocReqOffset: ULONG, + uAssocReqSize: ULONG, + uAssocRespOffset: ULONG, + uAssocRespSize: ULONG, + AuthAlgo: DOT11_AUTH_ALGORITHM, + UnicastCipher: DOT11_CIPHER_ALGORITHM, + MulticastCipher: DOT11_CIPHER_ALGORITHM, + uActivePhyListOffset: ULONG, + uActivePhyListSize: ULONG, + uBeaconOffset: ULONG, + uBeaconSize: ULONG, +}} +pub type PDOT11_INCOMING_ASSOC_COMPLETION_PARAMETERS = + *mut DOT11_INCOMING_ASSOC_COMPLETION_PARAMETERS; +pub const DOT11_STOP_AP_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_STOP_AP_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + ulReason: ULONG, +}} +pub type PDOT11_STOP_AP_PARAMETERS = *mut DOT11_STOP_AP_PARAMETERS; +pub const DOT11_STOP_AP_REASON_FREQUENCY_NOT_AVAILABLE: ULONG = 0x1; +pub const DOT11_STOP_AP_REASON_CHANNEL_NOT_AVAILABLE: ULONG = 0x2; +pub const DOT11_STOP_AP_REASON_AP_ACTIVE: ULONG = 0x3; +pub const DOT11_STOP_AP_REASON_IHV_START: ULONG = 0xFF000000; +pub const DOT11_STOP_AP_REASON_IHV_END: ULONG = 0xFFFFFFFF; +pub const DOT11_PHY_FREQUENCY_ADOPTED_PARAMETERS_REVISION_1: UCHAR = 1; +UNION!{union DOT11_PHY_FREQUENCY_ADOPTED_PARAMETERS_u { + [u32; 1], + ulChannel ulChannel_mut: ULONG, + ulFrequency ulFrequency_mut: ULONG, +}} +STRUCT!{struct DOT11_PHY_FREQUENCY_ADOPTED_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + ulPhyId: ULONG, + u: DOT11_PHY_FREQUENCY_ADOPTED_PARAMETERS_u, +}} +pub type PDOT11_PHY_FREQUENCY_ADOPTED_PARAMETERS = *mut DOT11_PHY_FREQUENCY_ADOPTED_PARAMETERS; +pub const DOT11_CAN_SUSTAIN_AP_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_CAN_SUSTAIN_AP_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + ulReason: ULONG, +}} +pub type PDOT11_CAN_SUSTAIN_AP_PARAMETERS = *mut DOT11_CAN_SUSTAIN_AP_PARAMETERS; +pub const DOT11_CAN_SUSTAIN_AP_REASON_IHV_START: ULONG = 0xFF000000; +pub const DOT11_CAN_SUSTAIN_AP_REASON_IHV_END: ULONG = 0xFFFFFFFF; +pub const NWF_EXTAP_OID: u32 = 0x03; +pub const OID_DOT11_WPS_ENABLED: u32 = NWF_DEFINE_OID!(0x01, NWF_EXTAP_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_START_AP_REQUEST: u32 = + NWF_DEFINE_OID!(0x02, NWF_EXTAP_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_AVAILABLE_CHANNEL_LIST: u32 = + NWF_DEFINE_OID!(0x03, NWF_EXTAP_OID, NWF_MANDATORY_OID); +pub const DOT11_AVAILABLE_CHANNEL_LIST_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_AVAILABLE_CHANNEL_LIST { + Header: NDIS_OBJECT_HEADER, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + uChannelNumber: [ULONG; 1], +}} +pub type PDOT11_AVAILABLE_CHANNEL_LIST = *mut DOT11_AVAILABLE_CHANNEL_LIST; +pub const OID_DOT11_AVAILABLE_FREQUENCY_LIST: u32 = + NWF_DEFINE_OID!(0x04, NWF_EXTAP_OID, NWF_MANDATORY_OID); +pub const DOT11_AVAILABLE_FREQUENCY_LIST_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_AVAILABLE_FREQUENCY_LIST { + Header: NDIS_OBJECT_HEADER, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + uFrequencyValue: [ULONG; 1], +}} +pub type PDOT11_AVAILABLE_FREQUENCY_LIST = *mut DOT11_AVAILABLE_FREQUENCY_LIST; +pub const OID_DOT11_DISASSOCIATE_PEER_REQUEST: u32 = + NWF_DEFINE_OID!(0x05, NWF_EXTAP_OID, NWF_MANDATORY_OID); +pub const DOT11_DISASSOCIATE_PEER_REQUEST_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_DISASSOCIATE_PEER_REQUEST { + Header: NDIS_OBJECT_HEADER, + PeerMacAddr: DOT11_MAC_ADDRESS, + usReason: USHORT, +}} +pub type PDOT11_DISASSOCIATE_PEER_REQUEST = *mut DOT11_DISASSOCIATE_PEER_REQUEST; +pub const OID_DOT11_INCOMING_ASSOCIATION_DECISION: u32 = + NWF_DEFINE_OID!(0x06, NWF_EXTAP_OID, NWF_MANDATORY_OID); +pub const DOT11_INCOMING_ASSOC_DECISION_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_INCOMING_ASSOC_DECISION { + Header: NDIS_OBJECT_HEADER, + PeerMacAddr: DOT11_MAC_ADDRESS, + bAccept: BOOLEAN, + usReasonCode: USHORT, + uAssocResponseIEsOffset: ULONG, + uAssocResponseIEsLength: ULONG, +}} +pub type PDOT11_INCOMING_ASSOC_DECISION = *mut DOT11_INCOMING_ASSOC_DECISION; +pub const DOT11_INCOMING_ASSOC_DECISION_REVISION_2: UCHAR = 2; +STRUCT!{struct DOT11_INCOMING_ASSOC_DECISION_V2 { + Header: NDIS_OBJECT_HEADER, + PeerMacAddr: DOT11_MAC_ADDRESS, + bAccept: BOOLEAN, + usReasonCode: USHORT, + uAssocResponseIEsOffset: ULONG, + uAssocResponseIEsLength: ULONG, + WFDStatus: DOT11_WFD_STATUS_CODE, +}} +pub type PDOT11_INCOMING_ASSOC_DECISION_V2 = *mut DOT11_INCOMING_ASSOC_DECISION_V2; +pub const OID_DOT11_ADDITIONAL_IE: u32 = NWF_DEFINE_OID!(0x07, NWF_EXTAP_OID, NWF_MANDATORY_OID); +pub const DOT11_ADDITIONAL_IE_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_ADDITIONAL_IE { + Header: NDIS_OBJECT_HEADER, + uBeaconIEsOffset: ULONG, + uBeaconIEsLength: ULONG, + uResponseIEsOffset: ULONG, + uResponseIEsLength: ULONG, +}} +pub type PDOT11_ADDITIONAL_IE = *mut DOT11_ADDITIONAL_IE; +pub const DOT11_EXTAP_SEND_CONTEXT_REVISION_1: UCHAR = 1; +pub const DOT11_EXTAP_RECV_CONTEXT_REVISION_1: UCHAR = 1; +pub const OID_DOT11_ENUM_PEER_INFO: u32 = NWF_DEFINE_OID!(0x08, NWF_EXTAP_OID, NWF_MANDATORY_OID); +STRUCT!{struct DOT11_PEER_STATISTICS { + ullDecryptSuccessCount: ULONGLONG, + ullDecryptFailureCount: ULONGLONG, + ullTxPacketSuccessCount: ULONGLONG, + ullTxPacketFailureCount: ULONGLONG, + ullRxPacketSuccessCount: ULONGLONG, + ullRxPacketFailureCount: ULONGLONG, +}} +pub type PDOT11_PEER_STATISTICS = *mut DOT11_PEER_STATISTICS; +STRUCT!{struct DOT11_PEER_INFO { + MacAddress: DOT11_MAC_ADDRESS, + usCapabilityInformation: USHORT, + AuthAlgo: DOT11_AUTH_ALGORITHM, + UnicastCipherAlgo: DOT11_CIPHER_ALGORITHM, + MulticastCipherAlgo: DOT11_CIPHER_ALGORITHM, + bWpsEnabled: BOOLEAN, + usListenInterval: USHORT, + ucSupportedRates: [UCHAR; MAX_NUM_SUPPORTED_RATES_V2], + usAssociationID: USHORT, + AssociationState: DOT11_ASSOCIATION_STATE, + PowerMode: DOT11_POWER_MODE, + liAssociationUpTime: LARGE_INTEGER, + Statistics: DOT11_PEER_STATISTICS, +}} +pub type PDOT11_PEER_INFO = *mut DOT11_PEER_INFO; +pub const DOT11_PEER_INFO_LIST_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_PEER_INFO_LIST { + Header: NDIS_OBJECT_HEADER, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + PeerInfo: [DOT11_PEER_INFO; 1], +}} +pub type PDOT11_PEER_INFO_LIST = *mut DOT11_PEER_INFO_LIST; +pub const DOT11_VWIFI_COMBINATION_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_VWIFI_COMBINATION { + Header: NDIS_OBJECT_HEADER, + uNumInfrastructure: ULONG, + uNumAdhoc: ULONG, + uNumSoftAP: ULONG, +}} +pub type PDOT11_VWIFI_COMBINATION = *mut DOT11_VWIFI_COMBINATION; +pub const DOT11_VWIFI_COMBINATION_REVISION_2: UCHAR = 2; +STRUCT!{struct DOT11_VWIFI_COMBINATION_V2 { + Header: NDIS_OBJECT_HEADER, + uNumInfrastructure: ULONG, + uNumAdhoc: ULONG, + uNumSoftAP: ULONG, + uNumVirtualStation: ULONG, +}} +pub type PDOT11_VWIFI_COMBINATION_V2 = *mut DOT11_VWIFI_COMBINATION_V2; +pub const DOT11_VWIFI_COMBINATION_REVISION_3: UCHAR = 3; +STRUCT!{struct DOT11_VWIFI_COMBINATION_V3 { + Header: NDIS_OBJECT_HEADER, + uNumInfrastructure: ULONG, + uNumAdhoc: ULONG, + uNumSoftAP: ULONG, + uNumVirtualStation: ULONG, + uNumWFDGroup: ULONG, +}} +pub type PDOT11_VWIFI_COMBINATION_V3 = *mut DOT11_VWIFI_COMBINATION_V3; +pub const DOT11_VWIFI_ATTRIBUTES_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_VWIFI_ATTRIBUTES { + Header: NDIS_OBJECT_HEADER, + uTotalNumOfEntries: ULONG, + Combinations: [DOT11_VWIFI_COMBINATION; 1], +}} +pub type PDOT11_VWIFI_ATTRIBUTES = *mut DOT11_VWIFI_ATTRIBUTES; +pub const NWF_VWIFI_OID: u32 = 0x04; +pub const OID_DOT11_CREATE_MAC: u32 = NWF_DEFINE_OID!(0x01, NWF_VWIFI_OID, NWF_MANDATORY_OID); +pub const DOT11_MAC_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_MAC_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + uOpmodeMask: ULONG, +}} +pub type PDOT11_MAC_PARAMETERS = *mut DOT11_MAC_PARAMETERS; +STRUCT!{struct DOT11_MAC_INFO { + uReserved: ULONG, + uNdisPortNumber: ULONG, + MacAddr: DOT11_MAC_ADDRESS, +}} +pub type PDOT11_MAC_INFO = *mut DOT11_MAC_INFO; +pub const OID_DOT11_DELETE_MAC: u32 = NWF_DEFINE_OID!(0x02, NWF_VWIFI_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_PREFERRED_MAC: u32 = NWF_DEFINE_OID!(0x03, NWF_VWIFI_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_VIRTUAL_STATION_CAPABILITY: u32 = + NWF_DEFINE_OID!(0x04, NWF_VWIFI_OID, NWF_OPTIONAL_OID); +pub const DOT11_WFD_ATTRIBUTES_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_WFD_ATTRIBUTES { + Header: NDIS_OBJECT_HEADER, + uNumConcurrentGORole: ULONG, + uNumConcurrentClientRole: ULONG, + WPSVersionsSupported: ULONG, + bServiceDiscoverySupported: BOOLEAN, + bClientDiscoverabilitySupported: BOOLEAN, + bInfrastructureManagementSupported: BOOLEAN, + uMaxSecondaryDeviceTypeListSize: ULONG, + DeviceAddress: DOT11_MAC_ADDRESS, + uInterfaceAddressListCount: ULONG, + pInterfaceAddressList: PDOT11_MAC_ADDRESS, + uNumSupportedCountryOrRegionStrings: ULONG, + pSupportedCountryOrRegionStrings: PDOT11_COUNTRY_OR_REGION_STRING, + uDiscoveryFilterListSize: ULONG, + uGORoleClientTableSize: ULONG, +}} +pub type PDOT11_WFD_ATTRIBUTES = *mut DOT11_WFD_ATTRIBUTES; +pub type DOT11_WFD_GROUP_CAPABILITY = UCHAR; +pub const DOT11_WFD_STATUS_SUCCESS: DOT11_WFD_STATUS_CODE = 0; +pub const DOT11_WFD_STATUS_FAILED_INFORMATION_IS_UNAVAILABLE: DOT11_WFD_STATUS_CODE = 1; +pub const DOT11_WFD_STATUS_FAILED_INCOMPATIBLE_PARAMETERS: DOT11_WFD_STATUS_CODE = 2; +pub const DOT11_WFD_STATUS_FAILED_LIMIT_REACHED: DOT11_WFD_STATUS_CODE = 3; +pub const DOT11_WFD_STATUS_FAILED_INVALID_PARAMETERS: DOT11_WFD_STATUS_CODE = 4; +pub const DOT11_WFD_STATUS_FAILED_UNABLE_TO_ACCOMODATE_REQUEST: DOT11_WFD_STATUS_CODE = 5; +pub const DOT11_WFD_STATUS_FAILED_PREVIOUS_PROTOCOL_ERROR: DOT11_WFD_STATUS_CODE = 6; +pub const DOT11_WFD_STATUS_FAILED_NO_COMMON_CHANNELS: DOT11_WFD_STATUS_CODE = 7; +pub const DOT11_WFD_STATUS_FAILED_UNKNOWN_WFD_GROUP: DOT11_WFD_STATUS_CODE = 8; +pub const DOT11_WFD_STATUS_FAILED_MATCHING_MAX_INTENT: DOT11_WFD_STATUS_CODE = 9; +pub const DOT11_WFD_STATUS_FAILED_INCOMPATIBLE_PROVISIONING_METHOD: DOT11_WFD_STATUS_CODE = 10; +pub const DOT11_WFD_STATUS_FAILED_REJECTED_BY_USER: DOT11_WFD_STATUS_CODE = 11; +pub const DOT11_WFD_STATUS_SUCCESS_ACCEPTED_BY_USER: DOT11_WFD_STATUS_CODE = 12; +#[inline] +pub fn WFD_STATUS_SUCCEEDED(status: DOT11_WFD_STATUS_CODE) -> bool { + status == DOT11_WFD_STATUS_SUCCESS || status == DOT11_WFD_STATUS_SUCCESS_ACCEPTED_BY_USER +} +#[inline] +pub fn WFD_STATUS_FAILED(status: DOT11_WFD_STATUS_CODE) -> bool { + status != DOT11_WFD_STATUS_SUCCESS || status != DOT11_WFD_STATUS_SUCCESS_ACCEPTED_BY_USER +} +pub const DOT11_WFD_MINOR_REASON_SUCCESS: i32 = 0; +pub const DOT11_WFD_MINOR_REASON_DISASSOCIATED_FROM_WLAN_CROSS_CONNECTION_POLICY: i32 = 1; +pub const DOT11_WFD_MINOR_REASON_DISASSOCIATED_NOT_MANAGED_INFRASTRUCTURE_CAPABLE: i32 = 2; +pub const DOT11_WFD_MINOR_REASON_DISASSOCIATED_WFD_COEXISTENCE_POLICY: i32 = 3; +pub const DOT11_WFD_MINOR_REASON_DISASSOCIATED_INFRASTRUCTURE_MANAGED_POLICY: i32 = 4; +pub const DOT11_WPS_VERSION_1_0: u8 = 0x01; +pub const DOT11_WPS_VERSION_2_0: u8 = 0x02; +pub const DOT11_WFD_DEVICE_CAPABILITY_SERVICE_DISCOVERY: u8 = 0x01; +pub const DOT11_WFD_DEVICE_CAPABILITY_P2P_CLIENT_DISCOVERABILITY: u8 = 0x02; +pub const DOT11_WFD_DEVICE_CAPABILITY_CONCURRENT_OPERATION: u8 = 0x04; +pub const DOT11_WFD_DEVICE_CAPABILITY_P2P_INFRASTRUCTURE_MANAGED: u8 = 0x08; +pub const DOT11_WFD_DEVICE_CAPABILITY_P2P_DEVICE_LIMIT: u8 = 0x10; +pub const DOT11_WFD_DEVICE_CAPABILITY_P2P_INVITATION_PROCEDURE: u8 = 0x20; +pub const DOT11_WFD_DEVICE_CAPABILITY_RESERVED_6: u8 = 0x40; +pub const DOT11_WFD_DEVICE_CAPABILITY_RESERVED_7: u8 = 0x80; +pub const DOT11_WFD_GROUP_CAPABILITY_NONE: u8 = 0x00; +pub const DOT11_WFD_GROUP_CAPABILITY_GROUP_OWNER: u8 = 0x01; +pub const DOT11_WFD_GROUP_CAPABILITY_PERSISTENT_GROUP: u8 = 0x02; +pub const DOT11_WFD_GROUP_CAPABILITY_GROUP_LIMIT_REACHED: u8 = 0x04; +pub const DOT11_WFD_GROUP_CAPABILITY_INTRABSS_DISTRIBUTION_SUPPORTED: u8 = 0x08; +pub const DOT11_WFD_GROUP_CAPABILITY_CROSS_CONNECTION_SUPPORTED: u8 = 0x10; +pub const DOT11_WFD_GROUP_CAPABILITY_PERSISTENT_RECONNECT_SUPPORTED: u8 = 0x20; +pub const DOT11_WFD_GROUP_CAPABILITY_IN_GROUP_FORMATION: u8 = 0x40; +pub const DOT11_WFD_GROUP_CAPABILITY_RESERVED_7: u8 = 0x80; +pub const DOT11_WFD_GROUP_CAPABILITY_EAPOL_KEY_IP_ADDRESS_ALLOCATION_SUPPORTED: u8 = 0x80; +pub const DOT11_WPS_DEVICE_NAME_MAX_LENGTH: usize = 32; +pub const DOT11_WPS_MAX_PASSKEY_LENGTH: usize = 8; +pub const DOT11_WPS_MAX_MODEL_NAME_LENGTH: usize = 32; +pub const DOT11_WPS_MAX_MODEL_NUMBER_LENGTH: usize = 32; +STRUCT!{struct DOT11_WFD_DEVICE_TYPE { + CategoryID: USHORT, + SubCategoryID: USHORT, + OUI: [UCHAR; 4], +}} +pub type PDOT11_WFD_DEVICE_TYPE = *mut DOT11_WFD_DEVICE_TYPE; +STRUCT!{struct DOT11_WPS_DEVICE_NAME { + uDeviceNameLength: ULONG, + ucDeviceName: [UCHAR; DOT11_WPS_DEVICE_NAME_MAX_LENGTH], +}} +pub type PDOT11_WPS_DEVICE_NAME = *mut DOT11_WPS_DEVICE_NAME; +STRUCT!{struct DOT11_WFD_CONFIGURATION_TIMEOUT { + GOTimeout: UCHAR, + ClientTimeout: UCHAR, +}} +pub type PDOT11_WFD_CONFIGURATION_TIMEOUT = *mut DOT11_WFD_CONFIGURATION_TIMEOUT; +STRUCT!{struct DOT11_WFD_GROUP_ID { + DeviceAddress: DOT11_MAC_ADDRESS, + SSID: DOT11_SSID, +}} +pub type PDOT11_WFD_GROUP_ID = *mut DOT11_WFD_GROUP_ID; +STRUCT!{#[repr(packed)] struct DOT11_WFD_GO_INTENT { + Bitfields: UCHAR, +}} +BITFIELD!{DOT11_WFD_GO_INTENT Bitfields: UCHAR [ + TieBreaker set_TieBreaker[0..1], + Intent set_Intent[1..8], +]} +pub type PDOT11_WFD_GO_INTENT = *mut DOT11_WFD_GO_INTENT; +STRUCT!{struct DOT11_WFD_CHANNEL { + CountryRegionString: DOT11_COUNTRY_OR_REGION_STRING, + OperatingClass: UCHAR, + ChannelNumber: UCHAR, +}} +pub type PDOT11_WFD_CHANNEL = *mut DOT11_WFD_CHANNEL; +ENUM!{enum DOT11_WPS_CONFIG_METHOD { + DOT11_WPS_CONFIG_METHOD_NULL = 0, + DOT11_WPS_CONFIG_METHOD_DISPLAY = 0x0008, + DOT11_WPS_CONFIG_METHOD_NFC_TAG = 0x0020, + DOT11_WPS_CONFIG_METHOD_NFC_INTERFACE = 0x0040, + DOT11_WPS_CONFIG_METHOD_PUSHBUTTON = 0x0080, + DOT11_WPS_CONFIG_METHOD_KEYPAD = 0x0100, + DOT11_WPS_CONFIG_METHOD_WFDS_DEFAULT = 0x1000, +}} +pub type PDOT11_WPS_CONFIG_METHOD = *mut DOT11_WPS_CONFIG_METHOD; +ENUM!{enum DOT11_WPS_DEVICE_PASSWORD_ID { + DOT11_WPS_PASSWORD_ID_DEFAULT = 0x0000, + DOT11_WPS_PASSWORD_ID_USER_SPECIFIED = 0x0001, + DOT11_WPS_PASSWORD_ID_MACHINE_SPECIFIED = 0x0002, + DOT11_WPS_PASSWORD_ID_REKEY = 0x0003, + DOT11_WPS_PASSWORD_ID_PUSHBUTTON = 0x0004, + DOT11_WPS_PASSWORD_ID_REGISTRAR_SPECIFIED = 0x0005, + DOT11_WPS_PASSWORD_ID_NFC_CONNECTION_HANDOVER = 0x0007, + DOT11_WPS_PASSWORD_ID_WFD_SERVICES = 0x0008, + DOT11_WPS_PASSWORD_ID_OOB_RANGE_MIN = 0x0010, + DOT11_WPS_PASSWORD_ID_OOB_RANGE_MAX = 0xFFFF, +}} +pub type PDOT11_WPS_DEVICE_PASSWORD_ID = *mut DOT11_WPS_DEVICE_PASSWORD_ID; +STRUCT!{struct WFDSVC_CONNECTION_CAPABILITY { + bNew: BOOLEAN, + bClient: BOOLEAN, + bGO: BOOLEAN, +}} +pub type PWFDSVC_CONNECTION_CAPABILITY = *mut WFDSVC_CONNECTION_CAPABILITY; +pub const WFDSVC_CONNECTION_CAPABILITY_NEW: BOOLEAN = 0x01; +pub const WFDSVC_CONNECTION_CAPABILITY_CLIENT: BOOLEAN = 0x02; +pub const WFDSVC_CONNECTION_CAPABILITY_GO: BOOLEAN = 0x04; +STRUCT!{struct DOT11_WFD_SERVICE_HASH_LIST { + ServiceHashCount: USHORT, + ServiceHash: [DOT11_WFD_SERVICE_HASH; 1], +}} +pub type PDOT11_WFD_SERVICE_HASH_LIST = *mut DOT11_WFD_SERVICE_HASH_LIST; +STRUCT!{struct DOT11_WFD_ADVERTISEMENT_ID { + AdvertisementID: ULONG, + ServiceAddress: DOT11_MAC_ADDRESS, +}} +pub type PDOT11_WFD_ADVERTISEMENT_ID = *mut DOT11_WFD_ADVERTISEMENT_ID; +STRUCT!{struct DOT11_WFD_SESSION_ID { + SessionID: ULONG, + SessionAddress: DOT11_MAC_ADDRESS, +}} +pub type PDOT11_WFD_SESSION_ID = *mut DOT11_WFD_SESSION_ID; +STRUCT!{struct DOT11_WFD_ADVERTISED_SERVICE_DESCRIPTOR { + AdvertisementID: ULONG, + ConfigMethods: USHORT, + ServiceNameLength: UCHAR, + ServiceName: [UCHAR; DOT11_WFD_SERVICE_NAME_MAX_LENGTH], +}} +pub type PDOT11_WFD_ADVERTISED_SERVICE_DESCRIPTOR = *mut DOT11_WFD_ADVERTISED_SERVICE_DESCRIPTOR; +STRUCT!{struct DOT11_WFD_ADVERTISED_SERVICE_LIST { + ServiceCount: USHORT, + AdvertisedService: [DOT11_WFD_ADVERTISED_SERVICE_DESCRIPTOR; 1], +}} +pub type PDOT11_WFD_ADVERTISED_SERVICE_LIST = *mut DOT11_WFD_ADVERTISED_SERVICE_LIST; +pub const DOT11_WFD_DISCOVER_COMPLETE_PARAMETERS_REVISION_1: UCHAR = 1; +pub const DOT11_WFD_DISCOVER_COMPLETE_MAX_LIST_SIZE: ULONG = 128; +STRUCT!{struct DOT11_WFD_DISCOVER_COMPLETE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + Status: NDIS_STATUS, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + uListOffset: ULONG, + uListLength: ULONG, +}} +pub type PDOT11_WFD_DISCOVER_COMPLETE_PARAMETERS = *mut DOT11_WFD_DISCOVER_COMPLETE_PARAMETERS; +pub const DOT11_GO_NEGOTIATION_REQUEST_SEND_COMPLETE_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_GO_NEGOTIATION_REQUEST_SEND_COMPLETE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + PeerDeviceAddress: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + Status: NDIS_STATUS, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_GO_NEGOTIATION_REQUEST_SEND_COMPLETE_PARAMETERS = + *mut DOT11_GO_NEGOTIATION_REQUEST_SEND_COMPLETE_PARAMETERS; +pub const DOT11_RECEIVED_GO_NEGOTIATION_REQUEST_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_RECEIVED_GO_NEGOTIATION_REQUEST_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + PeerDeviceAddress: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + RequestContext: PVOID, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_RECEIVED_GO_NEGOTIATION_REQUEST_PARAMETERS = + *mut DOT11_RECEIVED_GO_NEGOTIATION_REQUEST_PARAMETERS; +pub const DOT11_GO_NEGOTIATION_RESPONSE_SEND_COMPLETE_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_GO_NEGOTIATION_RESPONSE_SEND_COMPLETE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + PeerDeviceAddress: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + Status: NDIS_STATUS, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_GO_NEGOTIATION_RESPONSE_SEND_COMPLETE_PARAMETERS = + *mut DOT11_GO_NEGOTIATION_RESPONSE_SEND_COMPLETE_PARAMETERS; +pub const DOT11_RECEIVED_GO_NEGOTIATION_RESPONSE_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_RECEIVED_GO_NEGOTIATION_RESPONSE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + PeerDeviceAddress: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + ResponseContext: PVOID, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_RECEIVED_GO_NEGOTIATION_RESPONSE_PARAMETERS = + *mut DOT11_RECEIVED_GO_NEGOTIATION_RESPONSE_PARAMETERS; +pub const DOT11_GO_NEGOTIATION_CONFIRMATION_SEND_COMPLETE_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_GO_NEGOTIATION_CONFIRMATION_SEND_COMPLETE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + PeerDeviceAddress: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + Status: NDIS_STATUS, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_GO_NEGOTIATION_CONFIRMATION_SEND_COMPLETE_PARAMETERS = + *mut DOT11_GO_NEGOTIATION_CONFIRMATION_SEND_COMPLETE_PARAMETERS; +pub const DOT11_RECEIVED_GO_NEGOTIATION_CONFIRMATION_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_RECEIVED_GO_NEGOTIATION_CONFIRMATION_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + PeerDeviceAddress: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_RECEIVED_GO_NEGOTIATION_CONFIRMATION_PARAMETERS = + *mut DOT11_RECEIVED_GO_NEGOTIATION_CONFIRMATION_PARAMETERS; +pub const DOT11_INVITATION_REQUEST_SEND_COMPLETE_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_INVITATION_REQUEST_SEND_COMPLETE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + PeerDeviceAddress: DOT11_MAC_ADDRESS, + ReceiverAddress: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + Status: NDIS_STATUS, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_INVITATION_REQUEST_SEND_COMPLETE_PARAMETERS = + *mut DOT11_INVITATION_REQUEST_SEND_COMPLETE_PARAMETERS; +pub const DOT11_RECEIVED_INVITATION_REQUEST_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_RECEIVED_INVITATION_REQUEST_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + TransmitterDeviceAddress: DOT11_MAC_ADDRESS, + BSSID: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + RequestContext: PVOID, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_RECEIVED_INVITATION_REQUEST_PARAMETERS = + *mut DOT11_RECEIVED_INVITATION_REQUEST_PARAMETERS; +pub const DOT11_INVITATION_RESPONSE_SEND_COMPLETE_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_INVITATION_RESPONSE_SEND_COMPLETE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + ReceiverDeviceAddress: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + Status: NDIS_STATUS, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_INVITATION_RESPONSE_SEND_COMPLETE_PARAMETERS = + *mut DOT11_INVITATION_RESPONSE_SEND_COMPLETE_PARAMETERS; +pub const DOT11_RECEIVED_INVITATION_RESPONSE_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_RECEIVED_INVITATION_RESPONSE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + TransmitterDeviceAddress: DOT11_MAC_ADDRESS, + BSSID: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_RECEIVED_INVITATION_RESPONSE_PARAMETERS = + *mut DOT11_RECEIVED_INVITATION_RESPONSE_PARAMETERS; +pub const DOT11_PROVISION_DISCOVERY_REQUEST_SEND_COMPLETE_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_PROVISION_DISCOVERY_REQUEST_SEND_COMPLETE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + PeerDeviceAddress: DOT11_MAC_ADDRESS, + ReceiverAddress: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + Status: NDIS_STATUS, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_PROVISION_DISCOVERY_REQUEST_SEND_COMPLETE_PARAMETERS = + *mut DOT11_PROVISION_DISCOVERY_REQUEST_SEND_COMPLETE_PARAMETERS; +pub const DOT11_RECEIVED_PROVISION_DISCOVERY_REQUEST_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_RECEIVED_PROVISION_DISCOVERY_REQUEST_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + TransmitterDeviceAddress: DOT11_MAC_ADDRESS, + BSSID: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + RequestContext: PVOID, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_RECEIVED_PROVISION_DISCOVERY_REQUEST_PARAMETERS = + *mut DOT11_RECEIVED_PROVISION_DISCOVERY_REQUEST_PARAMETERS; +pub const DOT11_PROVISION_DISCOVERY_RESPONSE_SEND_COMPLETE_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_PROVISION_DISCOVERY_RESPONSE_SEND_COMPLETE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + ReceiverDeviceAddress: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + Status: NDIS_STATUS, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_PROVISION_DISCOVERY_RESPONSE_SEND_COMPLETE_PARAMETERS = + *mut DOT11_PROVISION_DISCOVERY_RESPONSE_SEND_COMPLETE_PARAMETERS; +pub const DOT11_RECEIVED_PROVISION_DISCOVERY_RESPONSE_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_RECEIVED_PROVISION_DISCOVERY_RESPONSE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + TransmitterDeviceAddress: DOT11_MAC_ADDRESS, + BSSID: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_RECEIVED_PROVISION_DISCOVERY_RESPONSE_PARAMETERS = + *mut DOT11_RECEIVED_PROVISION_DISCOVERY_RESPONSE_PARAMETERS; +pub const DOT11_ANQP_QUERY_COMPLETE_PARAMETERS_REVISION_1: UCHAR = 1; +ENUM!{enum DOT11_ANQP_QUERY_RESULT { + dot11_ANQP_query_result_success = 0, + dot11_ANQP_query_result_failure = 1, + dot11_ANQP_query_result_timed_out = 2, + dot11_ANQP_query_result_resources = 3, + dot11_ANQP_query_result_advertisement_protocol_not_supported_on_remote = 4, + dot11_ANQP_query_result_gas_protocol_failure = 5, + dot11_ANQP_query_result_advertisement_server_not_responding = 6, + dot11_ANQP_query_result_access_issues = 7, +}} +pub type PDOT11_ANQP_QUERY_RESULT = *mut DOT11_ANQP_QUERY_RESULT; +STRUCT!{struct DOT11_ANQP_QUERY_COMPLETE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + Status: DOT11_ANQP_QUERY_RESULT, + hContext: HANDLE, + uResponseLength: ULONG, +}} +pub type PDOT11_ANQP_QUERY_COMPLETE_PARAMETERS = *mut DOT11_ANQP_QUERY_COMPLETE_PARAMETERS; +pub const NWF_WFD_DEVICE_OID: u32 = 0x05; +pub const NWF_WFD_ROLE_OID: u32 = 0x06; +pub const OID_DOT11_WFD_DEVICE_CAPABILITY: u32 = + NWF_DEFINE_OID!(0x01, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const DOT11_WFD_DEVICE_CAPABILITY_CONFIG_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_WFD_DEVICE_CAPABILITY_CONFIG { + Header: NDIS_OBJECT_HEADER, + bServiceDiscoveryEnabled: BOOLEAN, + bClientDiscoverabilityEnabled: BOOLEAN, + bConcurrentOperationSupported: BOOLEAN, + bInfrastructureManagementEnabled: BOOLEAN, + bDeviceLimitReached: BOOLEAN, + bInvitationProcedureEnabled: BOOLEAN, + WPSVersionsEnabled: ULONG, +}} +pub type PDOT11_WFD_DEVICE_CAPABILITY_CONFIG = *mut DOT11_WFD_DEVICE_CAPABILITY_CONFIG; +pub const OID_DOT11_WFD_GROUP_OWNER_CAPABILITY: u32 = + NWF_DEFINE_OID!(0x02, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const DOT11_WFD_GROUP_OWNER_CAPABILITY_CONFIG_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_WFD_GROUP_OWNER_CAPABILITY_CONFIG { + Header: NDIS_OBJECT_HEADER, + bPersistentGroupEnabled: BOOLEAN, + bIntraBSSDistributionSupported: BOOLEAN, + bCrossConnectionSupported: BOOLEAN, + bPersistentReconnectSupported: BOOLEAN, + bGroupFormationEnabled: BOOLEAN, + uMaximumGroupLimit: ULONG, +}} +pub type PDOT11_WFD_GROUP_OWNER_CAPABILITY_CONFIG = *mut DOT11_WFD_GROUP_OWNER_CAPABILITY_CONFIG; +pub const DOT11_WFD_GROUP_OWNER_CAPABILITY_CONFIG_REVISION_2: UCHAR = 2; +STRUCT!{struct DOT11_WFD_GROUP_OWNER_CAPABILITY_CONFIG_V2 { + Header: NDIS_OBJECT_HEADER, + bPersistentGroupEnabled: BOOLEAN, + bIntraBSSDistributionSupported: BOOLEAN, + bCrossConnectionSupported: BOOLEAN, + bPersistentReconnectSupported: BOOLEAN, + bGroupFormationEnabled: BOOLEAN, + uMaximumGroupLimit: ULONG, + bEapolKeyIpAddressAllocationSupported: BOOLEAN, +}} +pub type PDOT11_WFD_GROUP_OWNER_CAPABILITY_CONFIG_V2 = + *mut DOT11_WFD_GROUP_OWNER_CAPABILITY_CONFIG_V2; +pub const OID_DOT11_WFD_DEVICE_INFO: u32 = + NWF_DEFINE_OID!(0x03, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const DOT11_WFD_DEVICE_INFO_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_WFD_DEVICE_INFO { + Header: NDIS_OBJECT_HEADER, + DeviceAddress: DOT11_MAC_ADDRESS, + ConfigMethods: USHORT, + PrimaryDeviceType: DOT11_WFD_DEVICE_TYPE, + DeviceName: DOT11_WPS_DEVICE_NAME, +}} +pub type PDOT11_WFD_DEVICE_INFO = *mut DOT11_WFD_DEVICE_INFO; +pub const OID_DOT11_WFD_SECONDARY_DEVICE_TYPE_LIST: u32 = + NWF_DEFINE_OID!(0x04, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const DOT11_WFD_SECONDARY_DEVICE_TYPE_LIST_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_WFD_SECONDARY_DEVICE_TYPE_LIST { + Header: NDIS_OBJECT_HEADER, + uNumOfEntries: ULONG, + uTotalNumOfEntries: ULONG, + SecondaryDeviceTypes: [DOT11_WFD_DEVICE_TYPE; 1], +}} +pub type PDOT11_WFD_SECONDARY_DEVICE_TYPE_LIST = *mut DOT11_WFD_SECONDARY_DEVICE_TYPE_LIST; +// pub const DOT11_SIZEOF_WFD_SECONDARY_DEVICE_TYPE_LIST_REVISION_1: usize = +// FIELD_OFFSET(DOT11_WFD_SECONDARY_DEVICE_TYPE_LIST, SecondaryDeviceTypes); +pub const OID_DOT11_WFD_DISCOVER_REQUEST: u32 = + NWF_DEFINE_OID!(0x05, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +ENUM!{enum DOT11_WFD_DISCOVER_TYPE { + dot11_wfd_discover_type_scan_only = 1, + dot11_wfd_discover_type_find_only = 2, + dot11_wfd_discover_type_auto = 3, + dot11_wfd_discover_type_scan_social_channels = 4, + dot11_wfd_discover_type_forced = 0x80000000, +}} +pub type PDOT11_WFD_DISCOVER_TYPE = *mut DOT11_WFD_DISCOVER_TYPE; +ENUM!{enum DOT11_WFD_SCAN_TYPE { + dot11_wfd_scan_type_active = 1, + dot11_wfd_scan_type_passive = 2, + dot11_wfd_scan_type_auto = 3, +}} +pub type PDOT11_WFD_SCAN_TYPE = *mut DOT11_WFD_SCAN_TYPE; +pub const DISCOVERY_FILTER_BITMASK_DEVICE: UCHAR = 0x1; +pub const DISCOVERY_FILTER_BITMASK_GO: UCHAR = 0x2; +pub const DISCOVERY_FILTER_BITMASK_ANY: UCHAR = 0xF; +STRUCT!{struct DOT11_WFD_DISCOVER_DEVICE_FILTER { + DeviceID: DOT11_MAC_ADDRESS, + ucBitmask: UCHAR, + GroupSSID: DOT11_SSID, +}} +pub type PDOT11_WFD_DISCOVER_DEVICE_FILTER = *mut DOT11_WFD_DISCOVER_DEVICE_FILTER; +pub const DOT11_WFD_DISCOVER_REQUEST_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_WFD_DISCOVER_REQUEST { + Header: NDIS_OBJECT_HEADER, + DiscoverType: DOT11_WFD_DISCOVER_TYPE, + ScanType: DOT11_WFD_SCAN_TYPE, + uDiscoverTimeout: ULONG, + uDeviceFilterListOffset: ULONG, + uNumDeviceFilters: ULONG, + uIEsOffset: ULONG, + uIEsLength: ULONG, + bForceScanLegacyNetworks: BOOLEAN, +}} +pub type PDOT11_WFD_DISCOVER_REQUEST = *mut DOT11_WFD_DISCOVER_REQUEST; +pub const OID_DOT11_WFD_ENUM_DEVICE_LIST: u32 = + NWF_DEFINE_OID!(0x06, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const DOT11_DEVICE_ENTRY_BYTE_ARRAY_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_WFD_DEVICE_ENTRY { + uPhyId: ULONG, + PhySpecificInfo: DOT11_BSS_ENTRY_PHY_SPECIFIC_INFO, + dot11BSSID: DOT11_MAC_ADDRESS, + dot11BSSType: DOT11_BSS_TYPE, + TransmitterAddress: DOT11_MAC_ADDRESS, + lRSSI: LONG, + uLinkQuality: ULONG, + usBeaconPeriod: USHORT, + ullTimestamp: ULONGLONG, + ullBeaconHostTimestamp: ULONGLONG, + ullProbeResponseHostTimestamp: ULONGLONG, + usCapabilityInformation: USHORT, + uBeaconIEsOffset: ULONG, + uBeaconIEsLength: ULONG, + uProbeResponseIEsOffset: ULONG, + uProbeResponseIEsLength: ULONG, +}} +pub type PDOT11_WFD_DEVICE_ENTRY = *mut DOT11_WFD_DEVICE_ENTRY; +pub const OID_DOT11_WFD_LISTEN_STATE_DISCOVERABILITY: u32 = + NWF_DEFINE_OID!(0x07, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const DOT11_WFD_DEVICE_NOT_DISCOVERABLE: i32 = 0; +pub const DOT11_WFD_DEVICE_AUTO_AVAILABILITY: i32 = 16; +pub const DOT11_WFD_DEVICE_HIGH_AVAILABILITY: i32 = 24; +pub const OID_DOT11_WFD_ADDITIONAL_IE: u32 = + NWF_DEFINE_OID!(0x08, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const DOT11_WFD_ADDITIONAL_IE_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_WFD_ADDITIONAL_IE { + Header: NDIS_OBJECT_HEADER, + uBeaconIEsOffset: ULONG, + uBeaconIEsLength: ULONG, + uProbeResponseIEsOffset: ULONG, + uProbeResponseIEsLength: ULONG, + uDefaultRequestIEsOffset: ULONG, + uDefaultRequestIEsLength: ULONG, +}} +pub type PDOT11_WFD_ADDITIONAL_IE = *mut DOT11_WFD_ADDITIONAL_IE; +pub const OID_DOT11_WFD_FLUSH_DEVICE_LIST: u32 = + NWF_DEFINE_OID!(0x09, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_WFD_SEND_GO_NEGOTIATION_REQUEST: u32 = + NWF_DEFINE_OID!(0x0A, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const DOT11_SEND_GO_NEGOTIATION_REQUEST_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_SEND_GO_NEGOTIATION_REQUEST_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + PeerDeviceAddress: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + uSendTimeout: ULONG, + GroupOwnerIntent: DOT11_WFD_GO_INTENT, + MinimumConfigTimeout: DOT11_WFD_CONFIGURATION_TIMEOUT, + IntendedInterfaceAddress: DOT11_MAC_ADDRESS, + GroupCapability: DOT11_WFD_GROUP_CAPABILITY, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_SEND_GO_NEGOTIATION_REQUEST_PARAMETERS = + *mut DOT11_SEND_GO_NEGOTIATION_REQUEST_PARAMETERS; +pub const OID_DOT11_WFD_SEND_GO_NEGOTIATION_RESPONSE: u32 = + NWF_DEFINE_OID!(0x0B, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const DOT11_SEND_GO_NEGOTIATION_RESPONSE_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_SEND_GO_NEGOTIATION_RESPONSE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + PeerDeviceAddress: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + RequestContext: PVOID, + uSendTimeout: ULONG, + Status: DOT11_WFD_STATUS_CODE, + GroupOwnerIntent: DOT11_WFD_GO_INTENT, + MinimumConfigTimeout: DOT11_WFD_CONFIGURATION_TIMEOUT, + IntendedInterfaceAddress: DOT11_MAC_ADDRESS, + GroupCapability: DOT11_WFD_GROUP_CAPABILITY, + GroupID: DOT11_WFD_GROUP_ID, + bUseGroupID: BOOLEAN, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_SEND_GO_NEGOTIATION_RESPONSE_PARAMETERS = + *mut DOT11_SEND_GO_NEGOTIATION_RESPONSE_PARAMETERS; +pub const OID_DOT11_WFD_SEND_GO_NEGOTIATION_CONFIRMATION: u32 = + NWF_DEFINE_OID!(0x0C, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const DOT11_SEND_GO_NEGOTIATION_CONFIRMATION_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_SEND_GO_NEGOTIATION_CONFIRMATION_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + PeerDeviceAddress: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + ResponseContext: PVOID, + uSendTimeout: ULONG, + Status: DOT11_WFD_STATUS_CODE, + GroupCapability: DOT11_WFD_GROUP_CAPABILITY, + GroupID: DOT11_WFD_GROUP_ID, + bUseGroupID: BOOLEAN, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_SEND_GO_NEGOTIATION_CONFIRMATION_PARAMETERS = + *mut DOT11_SEND_GO_NEGOTIATION_CONFIRMATION_PARAMETERS; +pub const OID_DOT11_WFD_SEND_INVITATION_REQUEST: u32 = + NWF_DEFINE_OID!(0x0D, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +STRUCT!{#[repr(packed)] struct DOT11_WFD_INVITATION_FLAGS { + Bitfields: UCHAR, +}} +BITFIELD!{DOT11_WFD_INVITATION_FLAGS Bitfields: UCHAR [ + InvitationType set_InvitationType[0..1], + Reserved set_Reserved[1..8], +]} +pub type PDOT11_WFD_INVITATION_FLAGS = *mut DOT11_WFD_INVITATION_FLAGS; +pub const DOT11_SEND_INVITATION_REQUEST_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_SEND_INVITATION_REQUEST_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + DialogToken: DOT11_DIALOG_TOKEN, + PeerDeviceAddress: DOT11_MAC_ADDRESS, + uSendTimeout: ULONG, + MinimumConfigTimeout: DOT11_WFD_CONFIGURATION_TIMEOUT, + InvitationFlags: DOT11_WFD_INVITATION_FLAGS, + GroupBSSID: DOT11_MAC_ADDRESS, + bUseGroupBSSID: BOOLEAN, + OperatingChannel: DOT11_WFD_CHANNEL, + bUseSpecifiedOperatingChannel: BOOLEAN, + GroupID: DOT11_WFD_GROUP_ID, + bLocalGO: BOOLEAN, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_SEND_INVITATION_REQUEST_PARAMETERS = + *mut DOT11_SEND_INVITATION_REQUEST_PARAMETERS; +pub const OID_DOT11_WFD_SEND_INVITATION_RESPONSE: u32 = + NWF_DEFINE_OID!(0x0E, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const DOT11_SEND_INVITATION_RESPONSE_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_SEND_INVITATION_RESPONSE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + ReceiverDeviceAddress: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + RequestContext: PVOID, + uSendTimeout: ULONG, + Status: DOT11_WFD_STATUS_CODE, + MinimumConfigTimeout: DOT11_WFD_CONFIGURATION_TIMEOUT, + GroupBSSID: DOT11_MAC_ADDRESS, + bUseGroupBSSID: BOOLEAN, + OperatingChannel: DOT11_WFD_CHANNEL, + bUseSpecifiedOperatingChannel: BOOLEAN, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_SEND_INVITATION_RESPONSE_PARAMETERS = + *mut DOT11_SEND_INVITATION_RESPONSE_PARAMETERS; +pub const OID_DOT11_WFD_SEND_PROVISION_DISCOVERY_REQUEST: u32 = + NWF_DEFINE_OID!(0x0F, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const DOT11_SEND_PROVISION_DISCOVERY_REQUEST_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_SEND_PROVISION_DISCOVERY_REQUEST_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + DialogToken: DOT11_DIALOG_TOKEN, + PeerDeviceAddress: DOT11_MAC_ADDRESS, + uSendTimeout: ULONG, + GroupCapability: DOT11_WFD_GROUP_CAPABILITY, + GroupID: DOT11_WFD_GROUP_ID, + bUseGroupID: BOOLEAN, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_SEND_PROVISION_DISCOVERY_REQUEST_PARAMETERS = + *mut DOT11_SEND_PROVISION_DISCOVERY_REQUEST_PARAMETERS; +pub const OID_DOT11_WFD_SEND_PROVISION_DISCOVERY_RESPONSE: u32 = + NWF_DEFINE_OID!(0x10, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const DOT11_SEND_PROVISION_DISCOVERY_RESPONSE_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_SEND_PROVISION_DISCOVERY_RESPONSE_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + ReceiverDeviceAddress: DOT11_MAC_ADDRESS, + DialogToken: DOT11_DIALOG_TOKEN, + RequestContext: PVOID, + uSendTimeout: ULONG, + uIEsOffset: ULONG, + uIEsLength: ULONG, +}} +pub type PDOT11_SEND_PROVISION_DISCOVERY_RESPONSE_PARAMETERS = + *mut DOT11_SEND_PROVISION_DISCOVERY_RESPONSE_PARAMETERS; +pub const OID_DOT11_WFD_GET_DIALOG_TOKEN: u32 = + NWF_DEFINE_OID!(0x11, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_WFD_STOP_DISCOVERY: u32 = + NWF_DEFINE_OID!(0x12, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_WFD_ENABLE_HRDSSS_DEVICES: u32 = + NWF_DEFINE_OID!(0x13, NWF_WFD_DEVICE_OID, NWF_OPTIONAL_OID); +pub const OID_DOT11_WFD_DEVICE_LISTEN_CHANNEL: u32 = + NWF_DEFINE_OID!(0x14, NWF_WFD_DEVICE_OID, NWF_MANDATORY_OID); +pub const DOT11_WFD_DEVICE_LISTEN_CHANNEL_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_WFD_DEVICE_LISTEN_CHANNEL { + Header: NDIS_OBJECT_HEADER, + ChannelNumber: UCHAR, +}} +pub type PDOT11_WFD_DEVICE_LISTEN_CHANNEL = *mut DOT11_WFD_DEVICE_LISTEN_CHANNEL; +pub const OID_DOT11_WFD_DESIRED_GROUP_ID: u32 = + NWF_DEFINE_OID!(0x01, NWF_WFD_ROLE_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_WFD_START_GO_REQUEST: u32 = + NWF_DEFINE_OID!(0x02, NWF_WFD_ROLE_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_WFD_GROUP_START_PARAMETERS: u32 = + NWF_DEFINE_OID!(0x03, NWF_WFD_ROLE_OID, NWF_MANDATORY_OID); +pub const DOT11_WFD_GROUP_START_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_WFD_GROUP_START_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + AdvertisedOperatingChannel: DOT11_WFD_CHANNEL, +}} +pub type PDOT11_WFD_GROUP_START_PARAMETERS = *mut DOT11_WFD_GROUP_START_PARAMETERS; +pub const OID_DOT11_WFD_CONNECT_TO_GROUP_REQUEST: u32 = + NWF_DEFINE_OID!(0x04, NWF_WFD_ROLE_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_WFD_DISCONNECT_FROM_GROUP_REQUEST: u32 = + NWF_DEFINE_OID!(0x05, NWF_WFD_ROLE_OID, NWF_MANDATORY_OID); +pub const OID_DOT11_WFD_GROUP_JOIN_PARAMETERS: u32 = + NWF_DEFINE_OID!(0x06, NWF_WFD_ROLE_OID, NWF_MANDATORY_OID); +pub const DOT11_WFD_GROUP_JOIN_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_WFD_GROUP_JOIN_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + GOOperatingChannel: DOT11_WFD_CHANNEL, + GOConfigTime: ULONG, + bInGroupFormation: BOOLEAN, + bWaitForWPSReady: BOOLEAN, +}} +pub type PDOT11_WFD_GROUP_JOIN_PARAMETERS = *mut DOT11_WFD_GROUP_JOIN_PARAMETERS; +pub const NWF_POWER_SAVE_OID: u32 = 0x07; +pub const OID_DOT11_POWER_MGMT_MODE_AUTO_ENABLED: u32 = + NWF_DEFINE_OID!(0x01, NWF_POWER_SAVE_OID, NWF_MANDATORY_OID); +pub const DOT11_POWER_MGMT_AUTO_MODE_ENABLED_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_POWER_MGMT_AUTO_MODE_ENABLED_INFO { + Header: NDIS_OBJECT_HEADER, + bEnabled: BOOLEAN, +}} +pub type PDOT11_POWER_MGMT_AUTO_MODE_ENABLED_INFO = *mut DOT11_POWER_MGMT_AUTO_MODE_ENABLED_INFO; +pub const OID_DOT11_POWER_MGMT_MODE_STATUS: u32 = + NWF_DEFINE_OID!(0x02, NWF_POWER_SAVE_OID, NWF_MANDATORY_OID); +ENUM!{enum DOT11_POWER_MODE_REASON { + dot11_power_mode_reason_no_change = 0, + dot11_power_mode_reason_noncompliant_AP = 1, + dot11_power_mode_reason_legacy_WFD_device = 2, + dot11_power_mode_reason_compliant_AP = 3, + dot11_power_mode_reason_compliant_WFD_device = 4, + dot11_power_mode_reason_others = 5, +}} +pub const DOT11_POWER_MGMT_MODE_STATUS_INFO_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_POWER_MGMT_MODE_STATUS_INFO { + Header: NDIS_OBJECT_HEADER, + PowerSaveMode: DOT11_POWER_MODE, + uPowerSaveLevel: ULONG, + Reason: DOT11_POWER_MODE_REASON, +}} +pub type PDOT11_POWER_MGMT_MODE_STATUSINFO = *mut DOT11_POWER_MGMT_MODE_STATUS_INFO; +pub const OID_DOT11_OFFLOAD_NETWORK_LIST: u32 = + NWF_DEFINE_OID!(0x03, NWF_POWER_SAVE_OID, NWF_MANDATORY_OID); +pub const DOT11_MAX_CHANNEL_HINTS: usize = 4; +pub const DOT11_INVALID_CHANNEL_NUMBER: ULONG = 0; +STRUCT!{struct DOT11_CHANNEL_HINT { + Dot11PhyType: DOT11_PHY_TYPE, + uChannelNumber: ULONG, +}} +pub type PDOT11_CHANNEL_HINT = *mut DOT11_CHANNEL_HINT; +STRUCT!{struct DOT11_OFFLOAD_NETWORK { + Ssid: DOT11_SSID, + UnicastCipher: DOT11_CIPHER_ALGORITHM, + AuthAlgo: DOT11_AUTH_ALGORITHM, + Dot11ChannelHints: [DOT11_CHANNEL_HINT; DOT11_MAX_CHANNEL_HINTS], +}} +pub type PDOT11_OFFLOAD_NETWORK = *mut DOT11_OFFLOAD_NETWORK; +pub const DOT11_NLO_FLAG_STOP_NLO_INDICATION: ULONG = 0x00000001; +pub const DOT11_NLO_FLAG_SCAN_ON_AOAC_PLATFORM: ULONG = 0x00000002; +pub const DOT11_NLO_FLAG_SCAN_AT_SYSTEM_RESUME: ULONG = 0x00000004; +pub const DOT11_OFFLOAD_NETWORK_LIST_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_OFFLOAD_NETWORK_LIST_INFO { + Header: NDIS_OBJECT_HEADER, + ulFlags: ULONG, + FastScanPeriod: ULONG, + FastScanIterations: ULONG, + SlowScanPeriod: ULONG, + uNumOfEntries: ULONG, + offloadNetworkList: [DOT11_OFFLOAD_NETWORK; 1], +}} +pub type PDOT11_OFFLOAD_NETWORK_LIST_INFO = *mut DOT11_OFFLOAD_NETWORK_LIST_INFO; +pub const DOT11_OFFLOAD_NETWORK_STATUS_PARAMETERS_REVISION_1: UCHAR = 1; +STRUCT!{struct DOT11_OFFLOAD_NETWORK_STATUS_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + Status: NDIS_STATUS, +}} +pub type PDOT11_OFFLOAD_NETWORK_STATUS_PARAMETERS = *mut DOT11_OFFLOAD_NETWORK_STATUS_PARAMETERS; +pub const NWF_MANUFACTURING_OID: u32 = 0x08; +pub const OID_DOT11_MANUFACTURING_TEST: u32 = + NWF_DEFINE_OID!(0x01, NWF_MANUFACTURING_OID, NWF_OPTIONAL_OID); +pub const DOT11_MANUFACTURING_TEST_REVISION_1: UCHAR = 1; +ENUM!{enum DOT11_MANUFACTURING_TEST_TYPE { + dot11_manufacturing_test_unknown = 0, + dot11_manufacturing_test_self_start = 1, + dot11_manufacturing_test_self_query_result = 2, + dot11_manufacturing_test_rx = 3, + dot11_manufacturing_test_tx = 4, + dot11_manufacturing_test_query_adc = 5, + dot11_manufacturing_test_set_data = 6, + dot11_manufacturing_test_query_data = 7, + dot11_manufacturing_test_sleep = 8, + dot11_manufacturing_test_awake = 9, + dot11_manufacturing_test_IHV_start = 0x80000000, + dot11_manufacturing_test_IHV_end = 0xffffffff, +}} +pub type PDOT11_MANUFACTURING_TEST_TYPE = *mut DOT11_MANUFACTURING_TEST_TYPE; +STRUCT!{struct DOT11_MANUFACTURING_TEST { + dot11ManufacturingTestType: DOT11_MANUFACTURING_TEST_TYPE, + uBufferLength: ULONG, + ucBuffer: [UCHAR; 1], +}} +pub type PDOT11_MANUFACTURING_TEST = *mut DOT11_MANUFACTURING_TEST; +ENUM!{enum DOT11_MANUFACTURING_SELF_TEST_TYPE { + DOT11_MANUFACTURING_SELF_TEST_TYPE_INTERFACE = 1, + DOT11_MANUFACTURING_SELF_TEST_TYPE_RF_INTERFACE = 2, + DOT11_MANUFACTURING_SELF_TEST_TYPE_BT_COEXISTENCE = 3, +}} +pub type PDOT11_MANUFACTURING_SELF_TEST_TYPE = *mut DOT11_MANUFACTURING_SELF_TEST_TYPE; +STRUCT!{struct DOT11_MANUFACTURING_SELF_TEST_SET_PARAMS { + SelfTestType: DOT11_MANUFACTURING_SELF_TEST_TYPE, + uTestID: ULONG, + uPinBitMask: ULONG, + pvContext: PVOID, + uBufferLength: ULONG, + ucBufferIn: [UCHAR; 1], +}} +pub type PDOT11_MANUFACTURING_SELF_TEST_SET_PARAMS = + *mut DOT11_MANUFACTURING_SELF_TEST_SET_PARAMS; +STRUCT!{struct DOT11_MANUFACTURING_SELF_TEST_QUERY_RESULTS { + SelfTestType: DOT11_MANUFACTURING_SELF_TEST_TYPE, + uTestID: ULONG, + bResult: BOOLEAN, + uPinFailedBitMask: ULONG, + pvContext: PVOID, + uBytesWrittenOut: ULONG, + ucBufferOut: [UCHAR; 1], +}} +pub type PDOT11_MANUFACTURING_SELF_TEST_QUERY_RESULTS = + *mut DOT11_MANUFACTURING_SELF_TEST_QUERY_RESULTS; +ENUM!{enum DOT11_BAND { + dot11_band_2p4g = 1, + dot11_band_4p9g = 2, + dot11_band_5g = 3, +}} +pub type PDOT11_BAND = *mut DOT11_BAND; +STRUCT!{struct DOT11_MANUFACTURING_FUNCTIONAL_TEST_RX { + bEnabled: BOOLEAN, + Dot11Band: DOT11_BAND, + uChannel: ULONG, + PowerLevel: LONG, +}} +pub type PDOT11_MANUFACTURING_FUNCTIONAL_TEST_RX = *mut DOT11_MANUFACTURING_FUNCTIONAL_TEST_RX; +STRUCT!{struct DOT11_MANUFACTURING_FUNCTIONAL_TEST_TX { + bEnable: BOOLEAN, + bOpenLoop: BOOLEAN, + Dot11Band: DOT11_BAND, + uChannel: ULONG, + uSetPowerLevel: ULONG, + ADCPowerLevel: LONG, +}} +pub type PDOT11_MANUFACTURING_FUNCTIONAL_TEST_TX = *mut DOT11_MANUFACTURING_FUNCTIONAL_TEST_TX; +STRUCT!{struct DOT11_MANUFACTURING_FUNCTIONAL_TEST_QUERY_ADC { + Dot11Band: DOT11_BAND, + uChannel: ULONG, + ADCPowerLevel: LONG, +}} +pub type PDOT11_MANUFACTURING_FUNCTIONAL_TEST_QUERY_ADC = + *mut DOT11_MANUFACTURING_FUNCTIONAL_TEST_QUERY_ADC; +STRUCT!{struct DOT11_MANUFACTURING_TEST_SET_DATA { + uKey: ULONG, + uOffset: ULONG, + uBufferLength: ULONG, + ucBufferIn: [UCHAR; 1], +}} +pub type PDOT11_MANUFACTURING_TEST_SET_DATA = *mut DOT11_MANUFACTURING_TEST_SET_DATA; +STRUCT!{struct DOT11_MANUFACTURING_TEST_QUERY_DATA { + uKey: ULONG, + uOffset: ULONG, + uBufferLength: ULONG, + uBytesRead: ULONG, + ucBufferOut: [UCHAR; 1], +}} +pub type PDOT11_MANUFACTURING_TEST_QUERY_DATA = *mut DOT11_MANUFACTURING_TEST_QUERY_DATA; +STRUCT!{struct DOT11_MANUFACTURING_TEST_SLEEP { + uSleepTime: ULONG, + pvContext: PVOID, +}} +pub type PDOT11_MANUFACTURING_TEST_SLEEP = *mut DOT11_MANUFACTURING_TEST_SLEEP; +ENUM!{enum DOT11_MANUFACTURING_CALLBACK_TYPE { + dot11_manufacturing_callback_unknown = 0, + dot11_manufacturing_callback_self_test_complete = 1, + dot11_manufacturing_callback_sleep_complete = 2, + dot11_manufacturing_callback_IHV_start = 0x80000000, + dot11_manufacturing_callback_IHV_end = 0xffffffff, +}} +pub type PDOT11_MANUFACTURING_CALLBACK_TYPE = *mut DOT11_MANUFACTURING_CALLBACK_TYPE; +STRUCT!{struct DOT11_MANUFACTURING_CALLBACK_PARAMETERS { + Header: NDIS_OBJECT_HEADER, + dot11ManufacturingCallbackType: DOT11_MANUFACTURING_CALLBACK_TYPE, + uStatus: ULONG, + pvContext: PVOID, +}} +pub type PDOT11_MANUFACTURING_CALLBACK_PARAMETERS = *mut DOT11_MANUFACTURING_CALLBACK_PARAMETERS; +pub const DOT11_MANUFACTURING_CALLBACK_REVISION_1: UCHAR = 1; +pub const OID_DOT11_SET_FT_REASSOCIATION_PARAMETERS: u32 = + NWF_DEFINE_OID!(0x168, NWF_OPERATIONAL_OID, NWF_MANDATORY_OID); diff -Nru cargo-0.44.1/vendor/winapi/src/shared/wlantypes.rs cargo-0.47.0/vendor/winapi/src/shared/wlantypes.rs --- cargo-0.44.1/vendor/winapi/src/shared/wlantypes.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/wlantypes.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,78 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +//! Structures used to hold wlan information. +use shared::basetsd::UINT8; +use shared::minwindef::{UCHAR, ULONG, USHORT}; +ENUM!{enum DOT11_BSS_TYPE { + dot11_BSS_type_infrastructure = 1, + dot11_BSS_type_independent = 2, + dot11_BSS_type_any = 3, +}} +pub type PDOT11_BSS_TYPE = *mut DOT11_BSS_TYPE; +pub const DOT11_SSID_MAX_LENGTH: usize = 32; +STRUCT!{struct DOT11_SSID { + uSSIDLength: ULONG, + ucSSID: [UCHAR; DOT11_SSID_MAX_LENGTH], +}} +pub type PDOT11_SSID = *mut DOT11_SSID; +ENUM!{enum DOT11_AUTH_ALGORITHM { + DOT11_AUTH_ALGO_80211_OPEN = 1, + DOT11_AUTH_ALGO_80211_SHARED_KEY = 2, + DOT11_AUTH_ALGO_WPA = 3, + DOT11_AUTH_ALGO_WPA_PSK = 4, + DOT11_AUTH_ALGO_WPA_NONE = 5, + DOT11_AUTH_ALGO_RSNA = 6, + DOT11_AUTH_ALGO_RSNA_PSK = 7, + DOT11_AUTH_ALGO_IHV_START = 0x80000000, + DOT11_AUTH_ALGO_IHV_END = 0xffffffff, +}} +pub type PDOT11_AUTH_ALGORITHM = *mut DOT11_AUTH_ALGORITHM; +pub const DOT11_AUTH_ALGORITHM_OPEN_SYSTEM: DOT11_AUTH_ALGORITHM = DOT11_AUTH_ALGO_80211_OPEN; +pub const DOT11_AUTH_ALGORITHM_SHARED_KEY: DOT11_AUTH_ALGORITHM = DOT11_AUTH_ALGO_80211_SHARED_KEY; +pub const DOT11_AUTH_ALGORITHM_WPA: DOT11_AUTH_ALGORITHM = DOT11_AUTH_ALGO_WPA; +pub const DOT11_AUTH_ALGORITHM_WPA_PSK: DOT11_AUTH_ALGORITHM = DOT11_AUTH_ALGO_WPA_PSK; +pub const DOT11_AUTH_ALGORITHM_WPA_NONE: DOT11_AUTH_ALGORITHM = DOT11_AUTH_ALGO_WPA_NONE; +pub const DOT11_AUTH_ALGORITHM_RSNA: DOT11_AUTH_ALGORITHM = DOT11_AUTH_ALGO_RSNA; +pub const DOT11_AUTH_ALGORITHM_RSNA_PSK: DOT11_AUTH_ALGORITHM = DOT11_AUTH_ALGO_RSNA_PSK; +ENUM!{enum DOT11_CIPHER_ALGORITHM { + DOT11_CIPHER_ALGO_NONE = 0x00, + DOT11_CIPHER_ALGO_WEP40 = 0x01, + DOT11_CIPHER_ALGO_TKIP = 0x02, + DOT11_CIPHER_ALGO_CCMP = 0x04, + DOT11_CIPHER_ALGO_WEP104 = 0x05, + DOT11_CIPHER_ALGO_BIP = 0x06, + DOT11_CIPHER_ALGO_GCMP = 0x08, + DOT11_CIPHER_ALGO_WPA_USE_GROUP = 0x100, + DOT11_CIPHER_ALGO_WEP = 0x101, + DOT11_CIPHER_ALGO_IHV_START = 0x80000000, + DOT11_CIPHER_ALGO_IHV_END = 0xffffffff, +}} +pub type PDOT11_CIPHER_ALGORITHM = *mut DOT11_CIPHER_ALGORITHM; +STRUCT!{struct DOT11_AUTH_CIPHER_PAIR { + AuthAlgoId: DOT11_AUTH_ALGORITHM, + CipherAlgoId: DOT11_CIPHER_ALGORITHM, +}} +pub type PDOT11_AUTH_CIPHER_PAIR = *mut DOT11_AUTH_CIPHER_PAIR; +pub const DOT11_OI_MAX_LENGTH: usize = 5; +pub const DOT11_OI_MIN_LENGTH: usize = 3; +STRUCT!{struct DOT11_OI { + OILength: USHORT, + OI: [UCHAR; DOT11_OI_MAX_LENGTH], +}} +pub type PDOT11_OI = *mut DOT11_OI; +STRUCT!{struct DOT11_ACCESSNETWORKOPTIONS { + AccessNetworkType: UINT8, + Internet: UINT8, + ASRA: UINT8, + ESR: UINT8, + UESA: UINT8, +}} +pub type PDOT11_ACCESSNETWORKOPTIONS = *mut DOT11_ACCESSNETWORKOPTIONS; +STRUCT!{struct DOT11_VENUEINFO { + VenueGroup: UINT8, + VenueType: UINT8, +}} +pub type PDOT11_VENUEINFO = *mut DOT11_VENUEINFO; diff -Nru cargo-0.44.1/vendor/winapi/src/shared/ws2ipdef.rs cargo-0.47.0/vendor/winapi/src/shared/ws2ipdef.rs --- cargo-0.44.1/vendor/winapi/src/shared/ws2ipdef.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/shared/ws2ipdef.rs 2020-10-01 21:38:28.000000000 +0000 @@ -8,7 +8,7 @@ use shared::in6addr::IN6_ADDR; use shared::inaddr::IN_ADDR; use shared::minwindef::{ULONG, USHORT}; -use shared::ws2def::{ADDRESS_FAMILY, SCOPE_ID}; +use shared::ws2def::{ADDRESS_FAMILY, SCOPE_ID, SOCKADDR_IN}; pub const IFF_UP: ULONG = 0x00000001; pub const IFF_BROADCAST: ULONG = 0x00000002; pub const IFF_LOOPBACK: ULONG = 0x00000004; @@ -44,11 +44,31 @@ u: SOCKADDR_IN6_LH_u, }} pub type PSOCKADDR_IN6_LH = *mut SOCKADDR_IN6_LH; +pub type SOCKADDR_IN6 = SOCKADDR_IN6_LH; +pub type PSOCKADDR_IN6 = *mut SOCKADDR_IN6; +STRUCT!{struct SOCKADDR_IN6_PAIR { + SourceAddress: PSOCKADDR_IN6, + DestinationAddress: PSOCKADDR_IN6, +}} +pub type PSOCKADDR_IN6_PAIR = *mut SOCKADDR_IN6_PAIR; +UNION!{union SOCKADDR_INET { + [u32; 7], + Ipv4 Ipv4_mut: SOCKADDR_IN, + Ipv6 Ipv6_mut: SOCKADDR_IN6, + si_family si_family_mut: ADDRESS_FAMILY, +}} +pub type PSOCKADDR_INET = *mut SOCKADDR_INET; STRUCT!{struct IP_MREQ { imr_multiaddr: IN_ADDR, imr_interface: IN_ADDR, }} pub type PIP_MREQ = *mut IP_MREQ; +STRUCT!{struct IP_MREQ_SOURCE { + imr_multiaddr: IN_ADDR, + imr_sourceaddr: IN_ADDR, + imr_interface: IN_ADDR, +}} +pub type PIP_MREQ_SOURCE = *mut IP_MREQ_SOURCE; pub const IPV6_HOPOPTS: c_int = 1; pub const IPV6_HDRINCL: c_int = 2; pub const IPV6_UNICAST_HOPS: c_int = 4; diff -Nru cargo-0.44.1/vendor/winapi/src/ucrt/corecrt.rs cargo-0.47.0/vendor/winapi/src/ucrt/corecrt.rs --- cargo-0.44.1/vendor/winapi/src/ucrt/corecrt.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/ucrt/corecrt.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,11 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +// #include +// #include +use ctypes::{__int64, c_long}; +pub type __time32_t = c_long; +pub type __time64_t = __int64; +pub type time_t = __time64_t; diff -Nru cargo-0.44.1/vendor/winapi/src/ucrt/mod.rs cargo-0.47.0/vendor/winapi/src/ucrt/mod.rs --- cargo-0.44.1/vendor/winapi/src/ucrt/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/ucrt/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,7 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +// Headers for the universal C Runtime +#[cfg(feature = "corecrt")] pub mod corecrt; diff -Nru cargo-0.44.1/vendor/winapi/src/um/adhoc.rs cargo-0.47.0/vendor/winapi/src/um/adhoc.rs --- cargo-0.44.1/vendor/winapi/src/um/adhoc.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/adhoc.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,220 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +use shared::guiddef::GUID; +use shared::minwindef::ULONG; +use um::unknwnbase::{IUnknown, IUnknownVtbl}; +use um::winnt::{BOOLEAN, HRESULT, LONG, LPCWSTR, LPWSTR}; +ENUM!{enum DOT11_ADHOC_CIPHER_ALGORITHM { + DOT11_ADHOC_CIPHER_ALGO_INVALID = -1i32 as u32, + DOT11_ADHOC_CIPHER_ALGO_NONE = 0, + DOT11_ADHOC_CIPHER_ALGO_CCMP = 0x4, + DOT11_ADHOC_CIPHER_ALGO_WEP = 0x101, +}} +ENUM!{enum DOT11_ADHOC_AUTH_ALGORITHM { + DOT11_ADHOC_AUTH_ALGO_INVALID = -1i32 as u32, + DOT11_ADHOC_AUTH_ALGO_80211_OPEN = 1, + DOT11_ADHOC_AUTH_ALGO_RSNA_PSK = 7, +}} +ENUM!{enum DOT11_ADHOC_NETWORK_CONNECTION_STATUS { + DOT11_ADHOC_NETWORK_CONNECTION_STATUS_INVALID = 0, + DOT11_ADHOC_NETWORK_CONNECTION_STATUS_DISCONNECTED = 11, + DOT11_ADHOC_NETWORK_CONNECTION_STATUS_CONNECTING = 12, + DOT11_ADHOC_NETWORK_CONNECTION_STATUS_CONNECTED = 13, + DOT11_ADHOC_NETWORK_CONNECTION_STATUS_FORMED = 14, +}} +ENUM!{enum DOT11_ADHOC_CONNECT_FAIL_REASON { + DOT11_ADHOC_CONNECT_FAIL_DOMAIN_MISMATCH = 0, + DOT11_ADHOC_CONNECT_FAIL_PASSPHRASE_MISMATCH = 1, + DOT11_ADHOC_CONNECT_FAIL_OTHER = 2, +}} +RIDL!{#[uuid(0x8f10cc26, 0xcf0d, 0x42a0, 0xac, 0xbe, 0xe2, 0xde, 0x70, 0x07, 0x38, 0x4d)] +interface IDot11AdHocManager(IDot11AdHocManagerVtbl): IUnknown(IUnknownVtbl) { + fn CreateNetwork( + Name: LPCWSTR, + Password: LPCWSTR, + GeographicalId: LONG, + pInterface: *mut IDot11AdHocInterface, + pSecurity: *mut IDot11AdHocSecuritySettings, + pContextGuid: *mut GUID, + pIAdHoc: *mut *mut IDot11AdHocNetwork, + ) -> HRESULT, + fn CommitCreatedNetwork( + pIAdHoc: *mut IDot11AdHocNetwork, + fSaveProfile: BOOLEAN, + fMakeSavedProfileUserSpecific: BOOLEAN, + ) -> HRESULT, + fn GetIEnumDot11AdHocNetworks( + pContextGuid: *mut GUID, + ppEnum: *mut *mut IEnumDot11AdHocNetworks, + ) -> HRESULT, + fn GetIEnumDot11AdHocInterfaces( + ppEnum: *mut *mut IEnumDot11AdHocInterfaces, + ) -> HRESULT, + fn GetNetwork( + NetworkSignature: *mut GUID, + pNetwork: *mut *mut IDot11AdHocNetwork, + ) -> HRESULT, +}} +RIDL!{#[uuid(0x8f10cc27, 0xcf0d, 0x42a0, 0xac, 0xbe, 0xe2, 0xde, 0x70, 0x07, 0x38, 0x4d)] +interface IDot11AdHocManagerNotificationSink(IDot11AdHocManagerNotificationSinkVtbl): + IUnknown(IUnknownVtbl) { + fn OnNetworkAdd( + pIAdHocNetwork: *mut IDot11AdHocNetwork, + ) -> HRESULT, + fn OnNetworkRemove( + Signature: *mut GUID, + ) -> HRESULT, + fn OnInterfaceAdd( + pIAdHocInterface: *mut IDot11AdHocInterface, + ) -> HRESULT, + fn OnInterfaceRemove( + Signature: *mut GUID, + ) -> HRESULT, +}} +RIDL!{#[uuid(0x8f10cc28, 0xcf0d, 0x42a0, 0xac, 0xbe, 0xe2, 0xde, 0x70, 0x07, 0x38, 0x4d)] +interface IEnumDot11AdHocNetworks(IEnumDot11AdHocNetworksVtbl): IUnknown(IUnknownVtbl) { + fn Next( + cElt: ULONG, + rgElt: *mut *mut IDot11AdHocNetwork, + pcEltFetched: *mut ULONG, + ) -> HRESULT, + fn Skip( + cElt: ULONG, + ) -> HRESULT, + fn Reset() -> HRESULT, + fn Clone( + ppEnum: *mut *mut IEnumDot11AdHocNetworks, + ) -> HRESULT, +}} +RIDL!{#[uuid(0x8f10cc29, 0xcf0d, 0x42a0, 0xac, 0xbe, 0xe2, 0xde, 0x70, 0x07, 0x38, 0x4d)] +interface IDot11AdHocNetwork(IDot11AdHocNetworkVtbl): IUnknown(IUnknownVtbl) { + fn GetStatus( + eStatus: *mut DOT11_ADHOC_NETWORK_CONNECTION_STATUS, + ) -> HRESULT, + fn GetSSID( + ppszwSSID: *mut LPWSTR, + ) -> HRESULT, + fn HasProfile( + pf11d: *mut BOOLEAN, + ) -> HRESULT, + fn GetProfileName( + ppszwProfileName: *mut LPWSTR, + ) -> HRESULT, + fn DeleteProfile() -> HRESULT, + fn GetSignalQuality( + puStrengthValue: *mut ULONG, + puStrengthMax: *mut ULONG, + ) -> HRESULT, + fn GetSecuritySetting( + pAdHocSecuritySetting: *mut *mut IDot11AdHocSecuritySettings, + ) -> HRESULT, + fn GetContextGuid( + pContextGuid: *mut GUID, + ) -> HRESULT, + fn GetSignature( + pSignature: *mut GUID, + ) -> HRESULT, + fn GetInterface( + pAdHocInterface: *mut *mut IDot11AdHocInterface, + ) -> HRESULT, + fn Connect( + Passphrase: LPCWSTR, + GeographicalId: LONG, + fSaveProfile: BOOLEAN, + fMakeSavedProfileUserSpecific: BOOLEAN, + ) -> HRESULT, + fn Disconnect() -> HRESULT, +}} +RIDL!{#[uuid(0x8f10cc2a, 0xcf0d, 0x42a0, 0xac, 0xbe, 0xe2, 0xde, 0x70, 0x07, 0x38, 0x4d)] +interface IDot11AdHocNetworkNotificationSink(IDot11AdHocNetworkNotificationSinkVtbl): + IUnknown(IUnknownVtbl) { + fn OnStatusChange( + eStatus: DOT11_ADHOC_NETWORK_CONNECTION_STATUS, + ) -> HRESULT, + fn OnConnectFail( + eFailReason: DOT11_ADHOC_CONNECT_FAIL_REASON, + ) -> HRESULT, +}} +RIDL!{#[uuid(0x8f10cc2b, 0xcf0d, 0x42a0, 0xac, 0xbe, 0xe2, 0xde, 0x70, 0x07, 0x38, 0x4d)] +interface IDot11AdHocInterface(IDot11AdHocInterfaceVtbl): IUnknown(IUnknownVtbl) { + fn GetDeviceSignature( + pSignature: *mut GUID, + ) -> HRESULT, + fn GetFriendlyName( + ppszName: *mut LPWSTR, + ) -> HRESULT, + fn IsDot11d( + pf11d: *mut BOOLEAN, + ) -> HRESULT, + fn IsAdHocCapable( + pfAdHocCapable: *mut BOOLEAN, + ) -> HRESULT, + fn IsRadioOn( + pfIsRadioOn: *mut BOOLEAN, + ) -> HRESULT, + fn GetActiveNetwork( + ppNetwork: *mut *mut IDot11AdHocNetwork, + ) -> HRESULT, + fn GetIEnumSecuritySettings( + ppEnum: *mut *mut IEnumDot11AdHocSecuritySettings, + ) -> HRESULT, + fn GetIEnumDot11AdHocNetworks( + pFilterGuid: *mut GUID, + ppEnum: *mut *mut IEnumDot11AdHocNetworks, + ) -> HRESULT, + fn GetStatus( + pState: *mut DOT11_ADHOC_NETWORK_CONNECTION_STATUS, + ) -> HRESULT, +}} +RIDL!{#[uuid(0x8f10cc2c, 0xcf0d, 0x42a0, 0xac, 0xbe, 0xe2, 0xde, 0x70, 0x07, 0x38, 0x4d)] +interface IEnumDot11AdHocInterfaces(IEnumDot11AdHocInterfacesVtbl): IUnknown(IUnknownVtbl) { + fn Next( + cElt: ULONG, + rgElt: *mut *mut IDot11AdHocInterface, + pcEltFetched: *mut ULONG, + ) -> HRESULT, + fn Skip( + cElt: ULONG, + ) -> HRESULT, + fn Reset() -> HRESULT, + fn Clone( + ppEnum: *mut *mut IEnumDot11AdHocInterfaces, + ) -> HRESULT, +}} +RIDL!{#[uuid(0x8f10cc2d, 0xcf0d, 0x42a0, 0xac, 0xbe, 0xe2, 0xde, 0x70, 0x07, 0x38, 0x4d)] +interface IEnumDot11AdHocSecuritySettings(IEnumDot11AdHocSecuritySettingsVtbl): + IUnknown(IUnknownVtbl) { + fn Next( + cElt: ULONG, + rgElt: *mut *mut IDot11AdHocSecuritySettings, + pcEltFetched: *mut ULONG, + ) -> HRESULT, + fn Skip( + cElt: ULONG, + ) -> HRESULT, + fn Reset() -> HRESULT, + fn Clone( + ppEnum: *mut *mut IEnumDot11AdHocSecuritySettings, + ) -> HRESULT, +}} +RIDL!{#[uuid(0x8f10cc2e, 0xcf0d, 0x42a0, 0xac, 0xbe, 0xe2, 0xde, 0x70, 0x07, 0x38, 0x4d)] +interface IDot11AdHocSecuritySettings(IDot11AdHocSecuritySettingsVtbl): IUnknown(IUnknownVtbl) { + fn GetDot11AuthAlgorithm( + pAuth: *mut DOT11_ADHOC_AUTH_ALGORITHM, + ) -> HRESULT, + fn GetDot11CipherAlgorithm( + pCipher: *mut DOT11_ADHOC_CIPHER_ALGORITHM, + ) -> HRESULT, +}} +RIDL!{#[uuid(0x8f10cc2f, 0xcf0d, 0x42a0, 0xac, 0xbe, 0xe2, 0xde, 0x70, 0x07, 0x38, 0x4d)] +interface IDot11AdHocInterfaceNotificationSink(IDot11AdHocInterfaceNotificationSinkVtbl): + IUnknown(IUnknownVtbl) { + fn OnConnectionStatusChange( + eStatus: DOT11_ADHOC_NETWORK_CONNECTION_STATUS, + ) -> HRESULT, +}} +RIDL!{#[uuid(0xdd06a84f, 0x83bd, 0x4d01, 0x8a, 0xb9, 0x23, 0x89, 0xfe, 0xa0, 0x86, 0x9e)] +class Dot11AdHocManager;} diff -Nru cargo-0.44.1/vendor/winapi/src/um/commctrl.rs cargo-0.47.0/vendor/winapi/src/um/commctrl.rs --- cargo-0.44.1/vendor/winapi/src/um/commctrl.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/commctrl.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1490,7 +1490,7 @@ uId: UINT_PTR, rect: RECT, hinst: HINSTANCE, - lpszText: LPSTR, + lpszText: LPWSTR, lParam: LPARAM, lpReserved: *mut c_void, }} diff -Nru cargo-0.44.1/vendor/winapi/src/um/d3d11.rs cargo-0.47.0/vendor/winapi/src/um/d3d11.rs --- cargo-0.44.1/vendor/winapi/src/um/d3d11.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/d3d11.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1852,6 +1852,7 @@ ppRenderTargetViews: *mut *mut ID3D11RenderTargetView, ppDepthStencilView: *mut *mut ID3D11DepthStencilView, UAVStartSlot: UINT, + NumUAVs: UINT, ppUnorderedAccessViews: *mut *mut ID3D11UnorderedAccessView, ) -> (), fn OMGetBlendState( @@ -2481,9 +2482,9 @@ ProcessIndex: UINT, }} ENUM!{enum D3D11_AUTHENTICATED_PROCESS_IDENTIFIER_TYPE { - DD3D11_PROCESSIDTYPE_UNKNOWN = 0, - DD3D11_PROCESSIDTYPE_DWM = 1, - DD3D11_PROCESSIDTYPE_HANDLE = 2, + D3D11_PROCESSIDTYPE_UNKNOWN = 0, + D3D11_PROCESSIDTYPE_DWM = 1, + D3D11_PROCESSIDTYPE_HANDLE = 2, }} STRUCT!{struct D3D11_AUTHENTICATED_QUERY_RESTRICTED_SHARED_RESOURCE_PROCESS_OUTPUT { Output: D3D11_AUTHENTICATED_QUERY_OUTPUT, diff -Nru cargo-0.44.1/vendor/winapi/src/um/d3d11tokenizedprogramformat.rs cargo-0.47.0/vendor/winapi/src/um/d3d11tokenizedprogramformat.rs --- cargo-0.44.1/vendor/winapi/src/um/d3d11tokenizedprogramformat.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/d3d11tokenizedprogramformat.rs 2020-10-01 21:38:28.000000000 +0000 @@ -17,20 +17,20 @@ pub const D3D10_SB_TOKENIZED_PROGRAM_TYPE_SHIFT: DWORD = 16; #[inline] pub fn DECODE_D3D10_SB_TOKENIZED_PROGRAM_TYPE(VerTok: DWORD) -> DWORD { - (((VerTok & D3D10_SB_TOKENIZED_PROGRAM_TYPE_MASK) >> D3D10_SB_TOKENIZED_PROGRAM_TYPE_SHIFT) - as D3D10_SB_TOKENIZED_PROGRAM_TYPE) + ((VerTok & D3D10_SB_TOKENIZED_PROGRAM_TYPE_MASK) >> D3D10_SB_TOKENIZED_PROGRAM_TYPE_SHIFT) + as D3D10_SB_TOKENIZED_PROGRAM_TYPE } pub const D3D10_SB_TOKENIZED_PROGRAM_MAJOR_VERSION_MASK: DWORD = 0x000000f0; pub const D3D10_SB_TOKENIZED_PROGRAM_MAJOR_VERSION_SHIFT: DWORD = 4; pub const D3D10_SB_TOKENIZED_PROGRAM_MINOR_VERSION_MASK: DWORD = 0x0000000f; #[inline] pub fn DECODE_D3D10_SB_TOKENIZED_PROGRAM_MAJOR_VERSION(VerTok: DWORD) -> DWORD { - ((VerTok & D3D10_SB_TOKENIZED_PROGRAM_MAJOR_VERSION_MASK) - >> D3D10_SB_TOKENIZED_PROGRAM_MAJOR_VERSION_SHIFT) + (VerTok & D3D10_SB_TOKENIZED_PROGRAM_MAJOR_VERSION_MASK) + >> D3D10_SB_TOKENIZED_PROGRAM_MAJOR_VERSION_SHIFT } #[inline] pub fn DECODE_D3D10_SB_TOKENIZED_PROGRAM_MINOR_VERSION(VerTok: DWORD) -> DWORD { - (VerTok & D3D10_SB_TOKENIZED_PROGRAM_MINOR_VERSION_MASK) + VerTok & D3D10_SB_TOKENIZED_PROGRAM_MINOR_VERSION_MASK } #[inline] pub fn ENCODE_D3D10_SB_TOKENIZED_PROGRAM_VERSION_TOKEN( @@ -904,14 +904,14 @@ pub fn ENCODE_D3D10_SB_NAME(Name: D3D10_SB_NAME) -> DWORD { Name & D3D10_SB_NAME_MASK } -pub const D3D10_SB_GLOBAL_FLAG_REFACTORING_ALLOWED: DWORD = (1 << 11); -pub const D3D11_SB_GLOBAL_FLAG_ENABLE_DOUBLE_PRECISION_FLOAT_OPS: DWORD = (1 << 12); -pub const D3D11_SB_GLOBAL_FLAG_FORCE_EARLY_DEPTH_STENCIL: DWORD = (1 << 13); -pub const D3D11_SB_GLOBAL_FLAG_ENABLE_RAW_AND_STRUCTURED_BUFFERS: DWORD = (1 << 14); -pub const D3D11_1_SB_GLOBAL_FLAG_SKIP_OPTIMIZATION: DWORD = (1 << 15); -pub const D3D11_1_SB_GLOBAL_FLAG_ENABLE_MINIMUM_PRECISION: DWORD = (1 << 16); -pub const D3D11_1_SB_GLOBAL_FLAG_ENABLE_DOUBLE_EXTENSIONS: DWORD = (1 << 17); -pub const D3D11_1_SB_GLOBAL_FLAG_ENABLE_SHADER_EXTENSIONS: DWORD = (1 << 18); +pub const D3D10_SB_GLOBAL_FLAG_REFACTORING_ALLOWED: DWORD = 1 << 11; +pub const D3D11_SB_GLOBAL_FLAG_ENABLE_DOUBLE_PRECISION_FLOAT_OPS: DWORD = 1 << 12; +pub const D3D11_SB_GLOBAL_FLAG_FORCE_EARLY_DEPTH_STENCIL: DWORD = 1 << 13; +pub const D3D11_SB_GLOBAL_FLAG_ENABLE_RAW_AND_STRUCTURED_BUFFERS: DWORD = 1 << 14; +pub const D3D11_1_SB_GLOBAL_FLAG_SKIP_OPTIMIZATION: DWORD = 1 << 15; +pub const D3D11_1_SB_GLOBAL_FLAG_ENABLE_MINIMUM_PRECISION: DWORD = 1 << 16; +pub const D3D11_1_SB_GLOBAL_FLAG_ENABLE_DOUBLE_EXTENSIONS: DWORD = 1 << 17; +pub const D3D11_1_SB_GLOBAL_FLAG_ENABLE_SHADER_EXTENSIONS: DWORD = 1 << 18; pub const D3D10_SB_GLOBAL_FLAGS_MASK: DWORD = 0x00fff800; #[inline] pub fn DECODE_D3D10_SB_GLOBAL_FLAGS(OpcodeToken0: DWORD) -> DWORD { diff -Nru cargo-0.44.1/vendor/winapi/src/um/dbghelp.rs cargo-0.47.0/vendor/winapi/src/um/dbghelp.rs --- cargo-0.44.1/vendor/winapi/src/um/dbghelp.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/dbghelp.rs 2020-10-01 21:38:28.000000000 +0000 @@ -7,7 +7,7 @@ use shared::basetsd::{DWORD64, PDWORD64, ULONG64}; use shared::guiddef::GUID; use shared::minwindef::{ - BOOL, DWORD, HMODULE, LPDWORD, PDWORD, PUCHAR, PULONG, UCHAR, ULONG, USHORT, WORD, + BOOL, DWORD, HMODULE, LPDWORD, MAX_PATH, PDWORD, PUCHAR, PULONG, UCHAR, ULONG, USHORT, WORD, }; use um::winnt::{ BOOLEAN, CHAR, HANDLE, LIST_ENTRY, PCSTR, PCWSTR, PIMAGE_NT_HEADERS, PIMAGE_SECTION_HEADER, @@ -372,6 +372,18 @@ Name: [WCHAR; 1], }} pub type PSYMBOL_INFOW = *mut SYMBOL_INFOW; +ENUM!{enum SYM_TYPE { + SymNone = 0, + SymCoff, + SymCv, + SymPdb, + SymExport, + SymDeferred, + SymSym, + SymDia, + SymVirtual, + NumSymTypes, +}} STRUCT!{struct IMAGEHLP_SYMBOL64 { SizeOfStruct: DWORD, Address: DWORD64, @@ -381,6 +393,34 @@ Name: [CHAR; 1], }} pub type PIMAGEHLP_SYMBOL64 = *mut IMAGEHLP_SYMBOL64; +STRUCT!{struct IMAGEHLP_MODULEW64 { + SizeOfStruct: DWORD, + BaseOfImage: DWORD64, + ImageSize: DWORD, + TimeDateStamp: DWORD, + CheckSum: DWORD, + NumSyms: DWORD, + SymType: SYM_TYPE, + ModuleName: [WCHAR; 32], + ImageName: [WCHAR; 256], + LoadedImageName: [WCHAR; 256], + LoadedPdbName: [WCHAR; 256], + CVSig: DWORD, + CVData: [WCHAR; MAX_PATH * 3], + PdbSig: DWORD, + PdbSig70: GUID, + PdbAge: DWORD, + PdbUnmatched: BOOL, + DbgUnmatched: BOOL, + LineNumbers: BOOL, + GlobalSymbols: BOOL, + TypeInfo: BOOL, + SourceIndexed: BOOL, + Publics: BOOL, + MachineType: DWORD, + Reserved: DWORD, +}} +pub type PIMAGEHLP_MODULEW64 = *mut IMAGEHLP_MODULEW64; STRUCT!{struct IMAGEHLP_LINEW64 { SizeOfStruct: DWORD, Key: PVOID, @@ -537,6 +577,44 @@ Rva: ULONG, LastRvaSection: *mut PIMAGE_SECTION_HEADER, ) -> PVOID; +} +pub const SYMOPT_CASE_INSENSITIVE: DWORD = 0x00000001; +pub const SYMOPT_UNDNAME: DWORD = 0x00000002; +pub const SYMOPT_DEFERRED_LOADS: DWORD = 0x00000004; +pub const SYMOPT_NO_CPP: DWORD = 0x00000008; +pub const SYMOPT_LOAD_LINES: DWORD = 0x00000010; +pub const SYMOPT_OMAP_FIND_NEAREST: DWORD = 0x00000020; +pub const SYMOPT_LOAD_ANYTHING: DWORD = 0x00000040; +pub const SYMOPT_IGNORE_CVREC: DWORD = 0x00000080; +pub const SYMOPT_NO_UNQUALIFIED_LOADS: DWORD = 0x00000100; +pub const SYMOPT_FAIL_CRITICAL_ERRORS: DWORD = 0x00000200; +pub const SYMOPT_EXACT_SYMBOLS: DWORD = 0x00000400; +pub const SYMOPT_ALLOW_ABSOLUTE_SYMBOLS: DWORD = 0x00000800; +pub const SYMOPT_IGNORE_NT_SYMPATH: DWORD = 0x00001000; +pub const SYMOPT_INCLUDE_32BIT_MODULES: DWORD = 0x00002000; +pub const SYMOPT_PUBLICS_ONLY: DWORD = 0x00004000; +pub const SYMOPT_NO_PUBLICS: DWORD = 0x00008000; +pub const SYMOPT_AUTO_PUBLICS: DWORD = 0x00010000; +pub const SYMOPT_NO_IMAGE_SEARCH: DWORD = 0x00020000; +pub const SYMOPT_SECURE: DWORD = 0x00040000; +pub const SYMOPT_NO_PROMPTS: DWORD = 0x00080000; +pub const SYMOPT_OVERWRITE: DWORD = 0x00100000; +pub const SYMOPT_IGNORE_IMAGEDIR: DWORD = 0x00200000; +pub const SYMOPT_FLAT_DIRECTORY: DWORD = 0x00400000; +pub const SYMOPT_FAVOR_COMPRESSED: DWORD = 0x00800000; +pub const SYMOPT_ALLOW_ZERO_ADDRESS: DWORD = 0x01000000; +pub const SYMOPT_DISABLE_SYMSRV_AUTODETECT: DWORD = 0x02000000; +pub const SYMOPT_READONLY_CACHE: DWORD = 0x04000000; +pub const SYMOPT_SYMPATH_LAST: DWORD = 0x08000000; +pub const SYMOPT_DISABLE_FAST_SYMBOLS: DWORD = 0x10000000; +pub const SYMOPT_DISABLE_SYMSRV_TIMEOUT: DWORD = 0x20000000; +pub const SYMOPT_DISABLE_SRVSTAR_ON_STARTUP: DWORD = 0x40000000; +pub const SYMOPT_DEBUG: DWORD = 0x80000000; +extern "system" { + pub fn SymSetOptions( + SymOptions: DWORD, + ) -> DWORD; + pub fn SymGetOptions() -> DWORD; pub fn SymCleanup( hProcess: HANDLE, ) -> BOOL; @@ -620,6 +698,11 @@ pdwDisplacement: PDWORD, Line: PIMAGEHLP_LINEW64, ) -> BOOL; + pub fn SymGetModuleInfoW64( + hProcess: HANDLE, + qwAddr: DWORD64, + ModuleInfo: PIMAGEHLP_MODULEW64, + ) -> BOOL; pub fn SymGetModuleBase64( hProcess: HANDLE, AddrBase: DWORD64, diff -Nru cargo-0.44.1/vendor/winapi/src/um/dot1x.rs cargo-0.47.0/vendor/winapi/src/um/dot1x.rs --- cargo-0.44.1/vendor/winapi/src/um/dot1x.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/dot1x.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,151 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +use shared::guiddef::GUID; +use shared::minwindef::{BOOL, DWORD}; +use um::eaptypes::EAP_METHOD_TYPE; +use um::l2cmn::L2_REASON_CODE_ONEX_BASE; +use um::winnt::HANDLE; +ENUM!{enum ONEX_AUTH_IDENTITY { + OneXAuthIdentityNone = 0, + OneXAuthIdentityMachine = 1, + OneXAuthIdentityUser = 2, + OneXAuthIdentityExplicitUser = 3, + OneXAuthIdentityGuest = 4, + OneXAuthIdentityInvalid = 5, +}} +pub type PONEX_AUTH_IDENTITY = *mut ONEX_AUTH_IDENTITY; +ENUM!{enum ONEX_AUTH_STATUS { + OneXAuthNotStarted = 0, + OneXAuthInProgress = 1, + OneXAuthNoAuthenticatorFound = 2, + OneXAuthSuccess = 3, + OneXAuthFailure = 4, + OneXAuthInvalid = 5, +}} +pub type PONEX_AUTH_STATUS = *mut ONEX_AUTH_STATUS; +ENUM!{enum ONEX_REASON_CODE { + ONEX_REASON_CODE_SUCCESS = 0, + ONEX_REASON_START = L2_REASON_CODE_ONEX_BASE, + ONEX_UNABLE_TO_IDENTIFY_USER = 327681, + ONEX_IDENTITY_NOT_FOUND = 327682, + ONEX_UI_DISABLED = 327683, + ONEX_UI_FAILURE = 327684, + ONEX_EAP_FAILURE_RECEIVED = 327685, + ONEX_AUTHENTICATOR_NO_LONGER_PRESENT = 327686, + ONEX_NO_RESPONSE_TO_IDENTITY = 327687, + ONEX_PROFILE_VERSION_NOT_SUPPORTED = 327688, + ONEX_PROFILE_INVALID_LENGTH = 327689, + ONEX_PROFILE_DISALLOWED_EAP_TYPE = 327690, + ONEX_PROFILE_INVALID_EAP_TYPE_OR_FLAG = 327691, + ONEX_PROFILE_INVALID_ONEX_FLAGS = 327692, + ONEX_PROFILE_INVALID_TIMER_VALUE = 327693, + ONEX_PROFILE_INVALID_SUPPLICANT_MODE = 327694, + ONEX_PROFILE_INVALID_AUTH_MODE = 327695, + ONEX_PROFILE_INVALID_EAP_CONNECTION_PROPERTIES = 327696, + ONEX_UI_CANCELLED = 327697, + ONEX_PROFILE_INVALID_EXPLICIT_CREDENTIALS = 327698, + ONEX_PROFILE_EXPIRED_EXPLICIT_CREDENTIALS = 327699, + ONEX_UI_NOT_PERMITTED = 327700, +}} +pub type PONEX_REASON_CODE = *mut ONEX_REASON_CODE; +ENUM!{enum ONEX_NOTIFICATION_TYPE { + OneXPublicNotificationBase = 0, + OneXNotificationTypeResultUpdate = 1, + OneXNotificationTypeAuthRestarted = 2, + OneXNotificationTypeEventInvalid = 3, + OneXNumNotifications = OneXNotificationTypeEventInvalid, +}} +pub type PONEX_NOTIFICATION_TYPE = *mut ONEX_NOTIFICATION_TYPE; +ENUM!{enum ONEX_AUTH_RESTART_REASON { + OneXRestartReasonPeerInitiated = 0, + OneXRestartReasonMsmInitiated = 1, + OneXRestartReasonOneXHeldStateTimeout = 2, + OneXRestartReasonOneXAuthTimeout = 3, + OneXRestartReasonOneXConfigurationChanged = 4, + OneXRestartReasonOneXUserChanged = 5, + OneXRestartReasonQuarantineStateChanged = 6, + OneXRestartReasonAltCredsTrial = 7, + OneXRestartReasonInvalid = 8, +}} +pub type PONEX_AUTH_RESTART_REASON = *mut ONEX_AUTH_RESTART_REASON; +STRUCT!{struct ONEX_VARIABLE_BLOB { + dwSize: DWORD, + dwOffset: DWORD, +}} +pub type PONEX_VARIABLE_BLOB = *mut ONEX_VARIABLE_BLOB; +STRUCT!{struct ONEX_AUTH_PARAMS { + fUpdatePending: BOOL, + oneXConnProfile: ONEX_VARIABLE_BLOB, + authIdentity: ONEX_AUTH_IDENTITY, + dwQuarantineState: DWORD, + Bitfields: DWORD, + dwSessionId: DWORD, + hUserToken: HANDLE, + OneXUserProfile: ONEX_VARIABLE_BLOB, + Identity: ONEX_VARIABLE_BLOB, + UserName: ONEX_VARIABLE_BLOB, + Domain: ONEX_VARIABLE_BLOB, +}} +BITFIELD!{ONEX_AUTH_PARAMS Bitfields: DWORD [ + fSessionId set_fSessionId[0..1], + fhUserToken set_fhUserToken[1..2], + fOnexUserProfile set_fOnexUserProfile[2..3], + fIdentity set_fIdentity[3..4], + fUserName set_fUserName[4..5], + fDomain set_fDomain[5..6], +]} +pub type PONEX_AUTH_PARAMS = *mut ONEX_AUTH_PARAMS; +STRUCT!{struct ONEX_EAP_ERROR { + dwWinError: DWORD, + type_: EAP_METHOD_TYPE, + dwReasonCode: DWORD, + rootCauseGuid: GUID, + repairGuid: GUID, + helpLinkGuid: GUID, + Bitfields: DWORD, + RootCauseString: ONEX_VARIABLE_BLOB, + RepairString: ONEX_VARIABLE_BLOB, +}} +BITFIELD!{ONEX_EAP_ERROR Bitfields: DWORD [ + fRootCauseString set_fRootCauseString[0..1], + fRepairString set_fRepairString[1..2], +]} +pub type PONEX_EAP_ERROR = *mut ONEX_EAP_ERROR; +STRUCT!{struct ONEX_STATUS { + authStatus: ONEX_AUTH_STATUS, + dwReason: DWORD, + dwError: DWORD, +}} +pub type PONEX_STATUS = *mut ONEX_STATUS; +ENUM!{enum ONEX_EAP_METHOD_BACKEND_SUPPORT { + OneXEapMethodBackendSupportUnknown = 0, + OneXEapMethodBackendSupported = 1, + OneXEapMethodBackendUnsupported = 2, +}} +STRUCT!{struct ONEX_RESULT_UPDATE_DATA { + oneXStatus: ONEX_STATUS, + BackendSupport: ONEX_EAP_METHOD_BACKEND_SUPPORT, + fBackendEngaged: BOOL, + Bitfields: DWORD, + authParams: ONEX_VARIABLE_BLOB, + eapError: ONEX_VARIABLE_BLOB, +}} +BITFIELD!{ONEX_RESULT_UPDATE_DATA Bitfields: DWORD [ + fOneXAuthParams set_fOneXAuthParams[0..1], + fEapError set_fEapError[1..2], +]} +pub type PONEX_RESULT_UPDATE_DATA = *mut ONEX_RESULT_UPDATE_DATA; +STRUCT!{struct ONEX_USER_INFO { + authIdentity: ONEX_AUTH_IDENTITY, + Bitfields: DWORD, + UserName: ONEX_VARIABLE_BLOB, + DomainName: ONEX_VARIABLE_BLOB, +}} +BITFIELD!{ONEX_USER_INFO Bitfields: DWORD [ + fUserName set_fUserName[0..1], + fDomainName set_fDomainName[1..2], +]} +pub type PONEX_USER_INFO = *mut ONEX_USER_INFO; diff -Nru cargo-0.44.1/vendor/winapi/src/um/eaptypes.rs cargo-0.47.0/vendor/winapi/src/um/eaptypes.rs --- cargo-0.44.1/vendor/winapi/src/um/eaptypes.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/eaptypes.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,403 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +use shared::guiddef::GUID; +use shared::minwindef::{BOOL, BYTE, DWORD}; +use um::winnt::LPWSTR; +pub const eapPropCipherSuiteNegotiation: DWORD = 0x00000001; +pub const eapPropMutualAuth: DWORD = 0x00000002; +pub const eapPropIntegrity: DWORD = 0x00000004; +pub const eapPropReplayProtection: DWORD = 0x00000008; +pub const eapPropConfidentiality: DWORD = 0x00000010; +pub const eapPropKeyDerivation: DWORD = 0x00000020; +pub const eapPropKeyStrength64: DWORD = 0x00000040; +pub const eapPropKeyStrength128: DWORD = 0x00000080; +pub const eapPropKeyStrength256: DWORD = 0x00000100; +pub const eapPropKeyStrength512: DWORD = 0x00000200; +pub const eapPropKeyStrength1024: DWORD = 0x00000400; +pub const eapPropDictionaryAttackResistance: DWORD = 0x00000800; +pub const eapPropFastReconnect: DWORD = 0x00001000; +pub const eapPropCryptoBinding: DWORD = 0x00002000; +pub const eapPropSessionIndependence: DWORD = 0x00004000; +pub const eapPropFragmentation: DWORD = 0x00008000; +pub const eapPropChannelBinding: DWORD = 0x00010000; +pub const eapPropNap: DWORD = 0x00020000; +pub const eapPropStandalone: DWORD = 0x00040000; +pub const eapPropMppeEncryption: DWORD = 0x00080000; +pub const eapPropTunnelMethod: DWORD = 0x00100000; +pub const eapPropSupportsConfig: DWORD = 0x00200000; +pub const eapPropCertifiedMethod: DWORD = 0x00400000; +pub const eapPropHiddenMethod: DWORD = 0x00800000; +pub const eapPropMachineAuth: DWORD = 0x01000000; +pub const eapPropUserAuth: DWORD = 0x02000000; +pub const eapPropIdentityPrivacy: DWORD = 0x04000000; +pub const eapPropMethodChaining: DWORD = 0x08000000; +pub const eapPropSharedStateEquivalence: DWORD = 0x10000000; +pub const eapPropReserved: DWORD = 0x80000000; +pub const EAP_VALUENAME_PROPERTIES: &'static str = "Properties"; +pub type EAP_SESSIONID = DWORD; +STRUCT!{struct EAP_TYPE { + type_: BYTE, + dwVendorId: DWORD, + dwVendorType: DWORD, +}} +STRUCT!{struct EAP_METHOD_TYPE { + eapType: EAP_TYPE, + dwAuthorId: DWORD, +}} +STRUCT!{struct EAP_METHOD_INFO { + eaptype: EAP_METHOD_TYPE, + pwszAuthorName: LPWSTR, + pwszFriendlyName: LPWSTR, + eapProperties: DWORD, + pInnerMethodInfo: *mut EAP_METHOD_INFO, +}} +STRUCT!{struct EAP_METHOD_INFO_EX { + eaptype: EAP_METHOD_TYPE, + pwszAuthorName: LPWSTR, + pwszFriendlyName: LPWSTR, + eapProperties: DWORD, + pInnerMethodInfoArray: *mut EAP_METHOD_INFO_ARRAY_EX, +}} +STRUCT!{struct EAP_METHOD_INFO_ARRAY { + dwNumberOfMethods: DWORD, + pEapMethods: *mut EAP_METHOD_INFO, +}} +STRUCT!{struct EAP_METHOD_INFO_ARRAY_EX { + dwNumberOfMethods: DWORD, + pEapMethods: *mut EAP_METHOD_INFO_EX, +}} +STRUCT!{struct EAP_ERROR { + dwWinError: DWORD, + type_: EAP_METHOD_TYPE, + dwReasonCode: DWORD, + rootCauseGuid: GUID, + repairGuid: GUID, + helpLinkGuid: GUID, + pRootCauseString: LPWSTR, + pRepairString: LPWSTR, +}} +ENUM!{enum EAP_ATTRIBUTE_TYPE { + eatMinimum = 0, + eatUserName = 1, + eatUserPassword = 2, + eatMD5CHAPPassword = 3, + eatNASIPAddress = 4, + eatNASPort = 5, + eatServiceType = 6, + eatFramedProtocol = 7, + eatFramedIPAddress = 8, + eatFramedIPNetmask = 9, + eatFramedRouting = 10, + eatFilterId = 11, + eatFramedMTU = 12, + eatFramedCompression = 13, + eatLoginIPHost = 14, + eatLoginService = 15, + eatLoginTCPPort = 16, + eatUnassigned17 = 17, + eatReplyMessage = 18, + eatCallbackNumber = 19, + eatCallbackId = 20, + eatUnassigned21 = 21, + eatFramedRoute = 22, + eatFramedIPXNetwork = 23, + eatState = 24, + eatClass = 25, + eatVendorSpecific = 26, + eatSessionTimeout = 27, + eatIdleTimeout = 28, + eatTerminationAction = 29, + eatCalledStationId = 30, + eatCallingStationId = 31, + eatNASIdentifier = 32, + eatProxyState = 33, + eatLoginLATService = 34, + eatLoginLATNode = 35, + eatLoginLATGroup = 36, + eatFramedAppleTalkLink = 37, + eatFramedAppleTalkNetwork = 38, + eatFramedAppleTalkZone = 39, + eatAcctStatusType = 40, + eatAcctDelayTime = 41, + eatAcctInputOctets = 42, + eatAcctOutputOctets = 43, + eatAcctSessionId = 44, + eatAcctAuthentic = 45, + eatAcctSessionTime = 46, + eatAcctInputPackets = 47, + eatAcctOutputPackets = 48, + eatAcctTerminateCause = 49, + eatAcctMultiSessionId = 50, + eatAcctLinkCount = 51, + eatAcctEventTimeStamp = 55, + eatMD5CHAPChallenge = 60, + eatNASPortType = 61, + eatPortLimit = 62, + eatLoginLATPort = 63, + eatTunnelType = 64, + eatTunnelMediumType = 65, + eatTunnelClientEndpoint = 66, + eatTunnelServerEndpoint = 67, + eatARAPPassword = 70, + eatARAPFeatures = 71, + eatARAPZoneAccess = 72, + eatARAPSecurity = 73, + eatARAPSecurityData = 74, + eatPasswordRetry = 75, + eatPrompt = 76, + eatConnectInfo = 77, + eatConfigurationToken = 78, + eatEAPMessage = 79, + eatSignature = 80, + eatARAPChallengeResponse = 84, + eatAcctInterimInterval = 85, + eatNASIPv6Address = 95, + eatFramedInterfaceId = 96, + eatFramedIPv6Prefix = 97, + eatLoginIPv6Host = 98, + eatFramedIPv6Route = 99, + eatFramedIPv6Pool = 100, + eatARAPGuestLogon = 8096, + eatCertificateOID = 8097, + eatEAPConfiguration = 8098, + eatPEAPEmbeddedEAPTypeId = 8099, + eatPEAPFastRoamedSession = 8100, + eatFastRoamedSession = 8100, + eatEAPTLV = 8102, + eatCredentialsChanged = 8103, + eatInnerEapMethodType = 8104, + eatClearTextPassword = 8107, + eatQuarantineSoH = 8150, + eatCertificateThumbprint = 8250, + eatPeerId = 9000, + eatServerId = 9001, + eatMethodId = 9002, + eatEMSK = 9003, + eatSessionId = 9004, + eatReserved = 0xFFFFFFFF, +}} +pub type EapAttributeType = EAP_ATTRIBUTE_TYPE; +STRUCT!{struct EAP_ATTRIBUTE { + eaType: EAP_ATTRIBUTE_TYPE, + dwLength: DWORD, + pValue: *mut BYTE, +}} +pub type EapAttribute = EAP_ATTRIBUTE; +STRUCT!{struct EAP_ATTRIBUTES { + dwNumberOfAttributes: DWORD, + pAttribs: *mut EAP_ATTRIBUTE, +}} +pub type EapAttributes = EAP_ATTRIBUTES; +pub const EAP_FLAG_Reserved1: DWORD = 0x00000001; +pub const EAP_FLAG_NON_INTERACTIVE: DWORD = 0x00000002; +pub const EAP_FLAG_LOGON: DWORD = 0x00000004; +pub const EAP_FLAG_PREVIEW: DWORD = 0x00000008; +pub const EAP_FLAG_Reserved2: DWORD = 0x00000010; +pub const EAP_FLAG_MACHINE_AUTH: DWORD = 0x00000020; +pub const EAP_FLAG_GUEST_ACCESS: DWORD = 0x00000040; +pub const EAP_FLAG_Reserved3: DWORD = 0x00000080; +pub const EAP_FLAG_Reserved4: DWORD = 0x00000100; +pub const EAP_FLAG_RESUME_FROM_HIBERNATE: DWORD = 0x00000200; +pub const EAP_FLAG_Reserved5: DWORD = 0x00000400; +pub const EAP_FLAG_Reserved6: DWORD = 0x00000800; +pub const EAP_FLAG_FULL_AUTH: DWORD = 0x00001000; +pub const EAP_FLAG_PREFER_ALT_CREDENTIALS: DWORD = 0x00002000; +pub const EAP_FLAG_Reserved7: DWORD = 0x00004000; +pub const EAP_PEER_FLAG_HEALTH_STATE_CHANGE: DWORD = 0x00008000; +pub const EAP_FLAG_SUPRESS_UI: DWORD = 0x00010000; +pub const EAP_FLAG_PRE_LOGON: DWORD = 0x00020000; +pub const EAP_FLAG_USER_AUTH: DWORD = 0x00040000; +pub const EAP_FLAG_CONFG_READONLY: DWORD = 0x00080000; +pub const EAP_FLAG_Reserved8: DWORD = 0x00100000; +pub const EAP_FLAG_Reserved9: DWORD = 0x00400000; +pub const EAP_FLAG_VPN: DWORD = 0x00800000; +pub const EAP_CONFIG_INPUT_FIELD_PROPS_DEFAULT: DWORD = 0x00000000; +pub const EAP_CONFIG_INPUT_FIELD_PROPS_NON_DISPLAYABLE: DWORD = 0x00000001; +pub const EAP_CONFIG_INPUT_FIELD_PROPS_NON_PERSIST: DWORD = 0x00000002; +pub const EAP_UI_INPUT_FIELD_PROPS_DEFAULT: DWORD = EAP_CONFIG_INPUT_FIELD_PROPS_DEFAULT; +pub const EAP_UI_INPUT_FIELD_PROPS_NON_DISPLAYABLE: DWORD = + EAP_CONFIG_INPUT_FIELD_PROPS_NON_DISPLAYABLE; +pub const EAP_UI_INPUT_FIELD_PROPS_NON_PERSIST: DWORD = 0x00000002; +pub const EAP_UI_INPUT_FIELD_PROPS_READ_ONLY: DWORD = 0x00000004; +ENUM!{enum EAP_CONFIG_INPUT_FIELD_TYPE { + EapConfigInputUsername = 0, + EapConfigInputPassword = 1, + EapConfigInputNetworkUsername = 2, + EapConfigInputNetworkPassword = 3, + EapConfigInputPin = 4, + EapConfigInputPSK = 5, + EapConfigInputEdit = 6, + EapConfigSmartCardUsername = 7, + EapConfigSmartCardError = 8, +}} +pub type PEAP_CONFIG_INPUT_FIELD_TYPE = *mut EAP_CONFIG_INPUT_FIELD_TYPE; +pub const EAP_CREDENTIAL_VERSION: i32 = 1; +pub const EAP_INTERACTIVE_UI_DATA_VERSION: i32 = 1; +pub const EAPHOST_PEER_API_VERSION: i32 = 1; +pub const EAPHOST_METHOD_API_VERSION: i32 = 1; +pub const MAX_EAP_CONFIG_INPUT_FIELD_LENGTH: i32 = 256; +pub const MAX_EAP_CONFIG_INPUT_FIELD_VALUE_LENGTH: i32 = 1024; +STRUCT!{struct EAP_CONFIG_INPUT_FIELD_DATA { + dwSize: DWORD, + Type: EAP_CONFIG_INPUT_FIELD_TYPE, + dwFlagProps: DWORD, + pwszLabel: LPWSTR, + pwszData: LPWSTR, + dwMinDataLength: DWORD, + dwMaxDataLength: DWORD, +}} +pub type PEAP_CONFIG_INPUT_FIELD_DATA = *mut EAP_CONFIG_INPUT_FIELD_DATA; +STRUCT!{struct EAP_CONFIG_INPUT_FIELD_ARRAY { + dwVersion: DWORD, + dwNumberOfFields: DWORD, + pFields: *mut EAP_CONFIG_INPUT_FIELD_DATA, +}} +pub type PEAP_CONFIG_INPUT_FIELD_ARRAY = *mut EAP_CONFIG_INPUT_FIELD_ARRAY; +ENUM!{enum EAP_INTERACTIVE_UI_DATA_TYPE { + EapCredReq = 0, + EapCredResp = 1, + EapCredExpiryReq = 2, + EapCredExpiryResp = 3, + EapCredLogonReq = 4, + EapCredLogonResp = 5, +}} +pub type EAP_CRED_REQ = EAP_CONFIG_INPUT_FIELD_ARRAY; +pub type EAP_CRED_RESP = EAP_CONFIG_INPUT_FIELD_ARRAY; +pub type EAP_CRED_LOGON_REQ = EAP_CONFIG_INPUT_FIELD_ARRAY; +pub type EAP_CRED_LOGON_RESP = EAP_CONFIG_INPUT_FIELD_ARRAY; +STRUCT!{struct EAP_CRED_EXPIRY_REQ { + curCreds: EAP_CONFIG_INPUT_FIELD_ARRAY, + newCreds: EAP_CONFIG_INPUT_FIELD_ARRAY, +}} +pub type EAP_CRED_EXPIRY_RESP = EAP_CRED_EXPIRY_REQ; +UNION!{union EAP_UI_DATA_FORMAT { + [usize; 1], + credData credData_mut: *mut EAP_CRED_REQ, + credExpiryData credExpiryData_mut: *mut EAP_CRED_EXPIRY_REQ, + credLogonData credLogonData_mut: *mut EAP_CRED_LOGON_REQ, +}} +STRUCT!{struct EAP_INTERACTIVE_UI_DATA { + dwVersion: DWORD, + dwSize: DWORD, + dwDataType: EAP_INTERACTIVE_UI_DATA_TYPE, + cbUiData: DWORD, + pbUiData: EAP_UI_DATA_FORMAT, +}} +ENUM!{enum EAP_METHOD_PROPERTY_TYPE { + emptPropCipherSuiteNegotiation = 0, + emptPropMutualAuth = 1, + emptPropIntegrity = 2, + emptPropReplayProtection = 3, + emptPropConfidentiality = 4, + emptPropKeyDerivation = 5, + emptPropKeyStrength64 = 6, + emptPropKeyStrength128 = 7, + emptPropKeyStrength256 = 8, + emptPropKeyStrength512 = 9, + emptPropKeyStrength1024 = 10, + emptPropDictionaryAttackResistance = 11, + emptPropFastReconnect = 12, + emptPropCryptoBinding = 13, + emptPropSessionIndependence = 14, + emptPropFragmentation = 15, + emptPropChannelBinding = 16, + emptPropNap = 17, + emptPropStandalone = 18, + emptPropMppeEncryption = 19, + emptPropTunnelMethod = 20, + emptPropSupportsConfig = 21, + emptPropCertifiedMethod = 22, + emptPropHiddenMethod = 23, + emptPropMachineAuth = 24, + emptPropUserAuth = 25, + emptPropIdentityPrivacy = 26, + emptPropMethodChaining = 27, + emptPropSharedStateEquivalence = 28, + emptLegacyMethodPropertyFlag = 31, + emptPropVendorSpecific = 255, +}} +ENUM!{enum EAP_METHOD_PROPERTY_VALUE_TYPE { + empvtBool = 0, + empvtDword = 1, + empvtString = 2, +}} +STRUCT!{struct EAP_METHOD_PROPERTY_VALUE_BOOL { + length: DWORD, + value: BOOL, +}} +STRUCT!{struct EAP_METHOD_PROPERTY_VALUE_DWORD { + length: DWORD, + value: DWORD, +}} +STRUCT!{struct EAP_METHOD_PROPERTY_VALUE_STRING { + length: DWORD, + value: *mut BYTE, +}} +UNION!{union EAP_METHOD_PROPERTY_VALUE { + [usize; 2], + empvBool empvBool_mut: EAP_METHOD_PROPERTY_VALUE_BOOL, + empvDword empvDword_mut: EAP_METHOD_PROPERTY_VALUE_DWORD, + empvString empvString_mut: EAP_METHOD_PROPERTY_VALUE_STRING, +}} +STRUCT!{struct EAP_METHOD_PROPERTY { + eapMethodPropertyType: EAP_METHOD_PROPERTY_TYPE, + eapMethodPropertyValueType: EAP_METHOD_PROPERTY_VALUE_TYPE, + eapMethodPropertyValue: EAP_METHOD_PROPERTY_VALUE, +}} +STRUCT!{struct EAP_METHOD_PROPERTY_ARRAY { + dwNumberOfProperties: DWORD, + pMethodProperty: *mut EAP_METHOD_PROPERTY, +}} +STRUCT!{struct EAPHOST_IDENTITY_UI_PARAMS { + eapMethodType: EAP_METHOD_TYPE, + dwFlags: DWORD, + dwSizeofConnectionData: DWORD, + pConnectionData: *mut BYTE, + dwSizeofUserData: DWORD, + pUserData: *mut BYTE, + dwSizeofUserDataOut: DWORD, + pUserDataOut: *mut BYTE, + pwszIdentity: LPWSTR, + dwError: DWORD, + pEapError: *mut EAP_ERROR, +}} +STRUCT!{struct EAPHOST_INTERACTIVE_UI_PARAMS { + dwSizeofContextData: DWORD, + pContextData: *mut BYTE, + dwSizeofInteractiveUIData: DWORD, + pInteractiveUIData: *mut BYTE, + dwError: DWORD, + pEapError: *mut EAP_ERROR, +}} +ENUM!{enum EapCredentialType { + EAP_EMPTY_CREDENTIAL = 0, + EAP_USERNAME_PASSWORD_CREDENTIAL = 1, + EAP_WINLOGON_CREDENTIAL = 2, + EAP_CERTIFICATE_CREDENTIAL = 3, + EAP_SIM_CREDENTIAL = 4, +}} +STRUCT!{struct EapUsernamePasswordCredential { + username: LPWSTR, + password: LPWSTR, +}} +pub const CERTIFICATE_HASH_LENGTH: usize = 20; +STRUCT!{struct EapCertificateCredential { + certHash: [BYTE; CERTIFICATE_HASH_LENGTH], + password: LPWSTR, +}} +STRUCT!{struct EapSimCredential { + iccID: LPWSTR, +}} +UNION!{union EapCredentialTypeData { + [u32; 6] [u64; 4], + username_password username_password_mut: EapUsernamePasswordCredential, + certificate certificate_mut: EapCertificateCredential, + sim sim_mut: EapSimCredential, +}} +STRUCT!{struct EapCredential { + credType: EapCredentialType, + credData: EapCredentialTypeData, +}} diff -Nru cargo-0.44.1/vendor/winapi/src/um/http.rs cargo-0.47.0/vendor/winapi/src/um/http.rs --- cargo-0.44.1/vendor/winapi/src/um/http.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/http.rs 2020-10-01 21:38:28.000000000 +0000 @@ -840,6 +840,15 @@ ParamDesc: HTTP_SERVICE_CONFIG_CACHE_PARAM, }} pub type PHTTP_SERVICE_CONFIG_CACHE_SET = *mut HTTP_SERVICE_CONFIG_CACHE_SET; +pub const HTTP_NULL_ID: ULONGLONG = 0; +#[inline] +pub unsafe fn HTTP_IS_NULL_ID(pid: PHTTP_OPAQUE_ID) -> bool { + HTTP_NULL_ID == *pid +} +#[inline] +pub unsafe fn HTTP_SET_NULL_ID(pid: PHTTP_OPAQUE_ID) { + *pid = HTTP_NULL_ID +} extern "system" { pub fn HttpInitialize( Version: HTTPAPI_VERSION, @@ -851,7 +860,7 @@ pReserved: PVOID, ) -> ULONG; pub fn HttpCreateHttpHandle( - pReqQueueHandle: HANDLE, + pReqQueueHandle: PHANDLE, Reserved: ULONG, ) -> ULONG; pub fn HttpCreateRequestQueue( @@ -1069,4 +1078,19 @@ pReturnLength: PULONG, pOverlapped: LPOVERLAPPED, ) -> ULONG; + pub fn HttpDeclarePush( + RequestQueueHandle: HANDLE, + RequestId: HTTP_REQUEST_ID, + Verb: HTTP_VERB, + Path: PCWSTR, + Query: PCSTR, + Headers: PHTTP_REQUEST_HEADERS, + ) -> ULONG; + pub fn HttpUpdateServiceConfiguration( + Handle: HANDLE, + ConfigId: HTTP_SERVICE_CONFIG_ID, + ConfigInfo: PVOID, + ConfigInfoLength: ULONG, + Overlapped: LPOVERLAPPED, + ) -> ULONG; } diff -Nru cargo-0.44.1/vendor/winapi/src/um/ipexport.rs cargo-0.47.0/vendor/winapi/src/um/ipexport.rs --- cargo-0.44.1/vendor/winapi/src/um/ipexport.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/ipexport.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,174 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +// #include +// #include +// #include +use shared::basetsd::ULONG64; +use shared::in6addr::in6_addr; +use shared::ntdef::{INT, PUCHAR, PVOID, UCHAR, ULONG, USHORT, WCHAR}; +pub const MAX_ADAPTER_NAME: usize = 128; +pub const MAX_OPT_SIZE: usize = 40; +pub type IPAddr = ULONG; +pub type IPMask = ULONG; +pub type IP_STATUS = ULONG; +pub type IPv6Addr = in6_addr; +STRUCT!{struct IP_OPTION_INFORMATION { + Ttl: UCHAR, + Tos: UCHAR, + Flags: UCHAR, + OptionsSize: UCHAR, + OptionsData: PUCHAR, +}} +pub type PIP_OPTION_INFORMATION = *mut IP_OPTION_INFORMATION; +#[cfg(target_arch = "x86_64")] +STRUCT!{struct IP_OPTION_INFORMATION32 { + Ttl: UCHAR, + Tos: UCHAR, + Flags: UCHAR, + OptionsSize: UCHAR, + OptionsData: u32, // UCHAR * POINTER_32 +}} +#[cfg(target_arch = "x86_64")] +pub type PIP_OPTION_INFORMATION32 = *mut IP_OPTION_INFORMATION32; +STRUCT!{struct ICMP_ECHO_REPLY { + Address: IPAddr, + Status: ULONG, + RoundTripTime: ULONG, + DataSize: USHORT, + Reserved: USHORT, + Data: PVOID, + Options: IP_OPTION_INFORMATION, +}} +pub type PICMP_ECHO_REPLY = *mut ICMP_ECHO_REPLY; +#[cfg(target_arch = "x86_64")] +STRUCT!{struct ICMP_ECHO_REPLY32 { + Address: IPAddr, + Status: ULONG, + RoundTripTime: ULONG, + DataSize: USHORT, + Reserved: USHORT, + Data: u32, // VOID * POINTER_32 + Options: IP_OPTION_INFORMATION32, +}} +#[cfg(target_arch = "x86_64")] +pub type PICMP_ECHO_REPLY32 = *mut ICMP_ECHO_REPLY32; +STRUCT!{#[repr(packed)] struct IPV6_ADDRESS_EX { + sin6_port: USHORT, + sin6_flowinfo: ULONG, + sin6_addr: [USHORT; 8], + sin6_scope_id: ULONG, +}} +pub type PIPV6_ADDRESS_EX = *mut IPV6_ADDRESS_EX; +// #include +STRUCT!{struct ICMPV6_ECHO_REPLY_LH { + Address: IPV6_ADDRESS_EX, + Status: ULONG, + RoundTripTime: INT, +}} +pub type PICMPV6_ECHO_REPLY_LH = *mut ICMPV6_ECHO_REPLY_LH; +pub type ICMPV6_ECHO_REPLY = ICMPV6_ECHO_REPLY_LH; +pub type PICMPV6_ECHO_REPLY = *mut ICMPV6_ECHO_REPLY; +// #endif +STRUCT!{struct ARP_SEND_REPLY { + DestAddress: IPAddr, + SrcAddress: IPAddr, +}} +pub type PARP_SEND_REPLY = *mut ARP_SEND_REPLY; +STRUCT!{struct TCP_RESERVE_PORT_RANGE { + UpperRange: USHORT, + LowerRange: USHORT, +}} +pub type PTCP_RESERVE_PORT_RANGE = *mut TCP_RESERVE_PORT_RANGE; +STRUCT!{struct IP_ADAPTER_INDEX_MAP { + Index: ULONG, + Name: [WCHAR; MAX_ADAPTER_NAME], +}} +pub type PIP_ADAPTER_INDEX_MAP = *mut IP_ADAPTER_INDEX_MAP; +STRUCT!{struct IP_INTERFACE_INFO { + NumAdapters: ULONG, + Adapter: [IP_ADAPTER_INDEX_MAP; 1], +}} +pub type PIP_INTERFACE_INFO = *mut IP_INTERFACE_INFO; +STRUCT!{struct IP_UNIDIRECTIONAL_ADAPTER_ADDRESS { + NumAdapters: ULONG, + Address: [IPAddr; 1], +}} +pub type PIP_UNIDIRECTIONAL_ADAPTER_ADDRESS = *mut IP_UNIDIRECTIONAL_ADAPTER_ADDRESS; +STRUCT!{struct IP_ADAPTER_ORDER_MAP { + NumAdapters: ULONG, + AdapterOrder: [ULONG; 1], +}} +pub type PIP_ADAPTER_ORDER_MAP = *mut IP_ADAPTER_ORDER_MAP; +STRUCT!{struct IP_MCAST_COUNTER_INFO { + InMcastOctets: ULONG64, + OutMcastOctets: ULONG64, + InMcastPkts: ULONG64, + OutMcastPkts: ULONG64, +}} +pub type PIP_MCAST_COUNTER_INFO = *mut IP_MCAST_COUNTER_INFO; +// IP_STATUS codes returned from IP APIs +pub const IP_STATUS_BASE: IP_STATUS = 11000; +pub const IP_SUCCESS: IP_STATUS = 0; +pub const IP_BUF_TOO_SMALL: IP_STATUS = IP_STATUS_BASE + 1; +pub const IP_DEST_NET_UNREACHABLE: IP_STATUS = IP_STATUS_BASE + 2; +pub const IP_DEST_HOST_UNREACHABLE: IP_STATUS = IP_STATUS_BASE + 3; +pub const IP_DEST_PROT_UNREACHABLE: IP_STATUS = IP_STATUS_BASE + 4; +pub const IP_DEST_PORT_UNREACHABLE: IP_STATUS = IP_STATUS_BASE + 5; +pub const IP_NO_RESOURCES: IP_STATUS = IP_STATUS_BASE + 6; +pub const IP_BAD_OPTION: IP_STATUS = IP_STATUS_BASE + 7; +pub const IP_HW_ERROR: IP_STATUS = IP_STATUS_BASE + 8; +pub const IP_PACKET_TOO_BIG: IP_STATUS = IP_STATUS_BASE + 9; +pub const IP_REQ_TIMED_OUT: IP_STATUS = IP_STATUS_BASE + 10; +pub const IP_BAD_REQ: IP_STATUS = IP_STATUS_BASE + 11; +pub const IP_BAD_ROUTE: IP_STATUS = IP_STATUS_BASE + 12; +pub const IP_TTL_EXPIRED_TRANSIT: IP_STATUS = IP_STATUS_BASE + 13; +pub const IP_TTL_EXPIRED_REASSEM: IP_STATUS = IP_STATUS_BASE + 14; +pub const IP_PARAM_PROBLEM: IP_STATUS = IP_STATUS_BASE + 15; +pub const IP_SOURCE_QUENCH: IP_STATUS = IP_STATUS_BASE + 16; +pub const IP_OPTION_TOO_BIG: IP_STATUS = IP_STATUS_BASE + 17; +pub const IP_BAD_DESTINATION: IP_STATUS = IP_STATUS_BASE + 18; +pub const IP_DEST_NO_ROUTE: IP_STATUS = IP_STATUS_BASE + 2; +pub const IP_DEST_ADDR_UNREACHABLE: IP_STATUS = IP_STATUS_BASE + 3; +pub const IP_DEST_PROHIBITED: IP_STATUS = IP_STATUS_BASE + 4; +pub const IP_HOP_LIMIT_EXCEEDED: IP_STATUS = IP_STATUS_BASE + 13; +pub const IP_REASSEMBLY_TIME_EXCEEDED: IP_STATUS = IP_STATUS_BASE + 14; +pub const IP_PARAMETER_PROBLEM: IP_STATUS = IP_STATUS_BASE + 15; +pub const IP_DEST_UNREACHABLE: IP_STATUS = IP_STATUS_BASE + 40; +pub const IP_TIME_EXCEEDED: IP_STATUS = IP_STATUS_BASE + 41; +pub const IP_BAD_HEADER: IP_STATUS = IP_STATUS_BASE + 42; +pub const IP_UNRECOGNIZED_NEXT_HEADER: IP_STATUS = IP_STATUS_BASE + 43; +pub const IP_ICMP_ERROR: IP_STATUS = IP_STATUS_BASE + 44; +pub const IP_DEST_SCOPE_MISMATCH: IP_STATUS = IP_STATUS_BASE + 45; +pub const IP_ADDR_DELETED: IP_STATUS = IP_STATUS_BASE + 19; +pub const IP_SPEC_MTU_CHANGE: IP_STATUS = IP_STATUS_BASE + 20; +pub const IP_MTU_CHANGE: IP_STATUS = IP_STATUS_BASE + 21; +pub const IP_UNLOAD: IP_STATUS = IP_STATUS_BASE + 22; +pub const IP_ADDR_ADDED: IP_STATUS = IP_STATUS_BASE + 23; +pub const IP_MEDIA_CONNECT: IP_STATUS = IP_STATUS_BASE + 24; +pub const IP_MEDIA_DISCONNECT: IP_STATUS = IP_STATUS_BASE + 25; +pub const IP_BIND_ADAPTER: IP_STATUS = IP_STATUS_BASE + 26; +pub const IP_UNBIND_ADAPTER: IP_STATUS = IP_STATUS_BASE + 27; +pub const IP_DEVICE_DOES_NOT_EXIST: IP_STATUS = IP_STATUS_BASE + 28; +pub const IP_DUPLICATE_ADDRESS: IP_STATUS = IP_STATUS_BASE + 29; +pub const IP_INTERFACE_METRIC_CHANGE: IP_STATUS = IP_STATUS_BASE + 30; +pub const IP_RECONFIG_SECFLTR: IP_STATUS = IP_STATUS_BASE + 31; +pub const IP_NEGOTIATING_IPSEC: IP_STATUS = IP_STATUS_BASE + 32; +pub const IP_INTERFACE_WOL_CAPABILITY_CHANGE: IP_STATUS = IP_STATUS_BASE + 33; +pub const IP_DUPLICATE_IPADD: IP_STATUS = IP_STATUS_BASE + 34; +pub const IP_GENERAL_FAILURE: IP_STATUS = IP_STATUS_BASE + 50; +pub const MAX_IP_STATUS: IP_STATUS = IP_GENERAL_FAILURE; +pub const IP_PENDING: IP_STATUS = IP_STATUS_BASE + 255; +pub const IP_FLAG_REVERSE: UCHAR = 0x1; +pub const IP_FLAG_DF: UCHAR = 0x2; +pub const IP_OPT_EOL: u8 = 0; +pub const IP_OPT_NOP: u8 = 1; +pub const IP_OPT_SECURITY: u8 = 0x82; +pub const IP_OPT_LSRR: u8 = 0x83; +pub const IP_OPT_SSRR: u8 = 0x89; +pub const IP_OPT_RR: u8 = 0x7; +pub const IP_OPT_TS: u8 = 0x44; +pub const IP_OPT_SID: u8 = 0x88; +pub const IP_OPT_ROUTER_ALERT: u8 = 0x94; diff -Nru cargo-0.44.1/vendor/winapi/src/um/iphlpapi.rs cargo-0.47.0/vendor/winapi/src/um/iphlpapi.rs --- cargo-0.44.1/vendor/winapi/src/um/iphlpapi.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/iphlpapi.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,521 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +// #include +// #include +// #include +// #include +use shared::basetsd::{PULONG64, ULONG64}; +use shared::ifdef::NET_LUID; +use shared::ifmib::{PMIB_IFROW, PMIB_IFTABLE}; +use shared::ipmib::{ + PMIB_ICMP, PMIB_ICMP_EX, PMIB_IPADDRTABLE, PMIB_IPFORWARDROW, PMIB_IPFORWARDTABLE, + PMIB_IPNETROW, PMIB_IPNETTABLE, PMIB_IPSTATS +}; +use shared::iprtrmib::{TCPIP_OWNER_MODULE_INFO_CLASS, TCP_TABLE_CLASS, UDP_TABLE_CLASS}; +use shared::minwindef::{BOOL, BYTE, DWORD, LPDWORD, PDWORD, PUCHAR, PULONG, UINT}; +use shared::ntdef::{ + BOOLEAN, HANDLE, LPWSTR, PHANDLE, PVOID, PWSTR, ULONG, ULONGLONG, USHORT, WCHAR, +}; +use shared::tcpestats::TCP_ESTATS_TYPE; +use shared::tcpmib::{ + PMIB_TCP6ROW, PMIB_TCP6ROW_OWNER_MODULE, PMIB_TCP6TABLE, PMIB_TCP6TABLE2, PMIB_TCPROW, + PMIB_TCPROW_OWNER_MODULE, PMIB_TCPSTATS, PMIB_TCPSTATS2, PMIB_TCPTABLE, PMIB_TCPTABLE2 +}; +use shared::udpmib::{ + PMIB_UDP6ROW_OWNER_MODULE, PMIB_UDP6TABLE, PMIB_UDPROW_OWNER_MODULE, PMIB_UDPSTATS, + PMIB_UDPSTATS2, PMIB_UDPTABLE +}; +use shared::ws2def::{PSOCKADDR, SOCKADDR, SOCKADDR_IN}; +use shared::ws2ipdef::SOCKADDR_IN6; +use um::ipexport::{ + IPAddr, IPMask, IP_STATUS, PIP_ADAPTER_INDEX_MAP, PIP_ADAPTER_ORDER_MAP, PIP_INTERFACE_INFO, + PIP_UNIDIRECTIONAL_ADAPTER_ADDRESS, +}; +use um::iptypes::{ + PFIXED_INFO, PIP_ADAPTER_ADDRESSES, PIP_ADAPTER_INFO, PIP_INTERFACE_NAME_INFO, + PIP_PER_ADAPTER_INFO, +}; +use um::minwinbase::{LPOVERLAPPED,OVERLAPPED}; +extern "system" { + pub fn GetNumberOfInterfaces( + pdwNumIf: PDWORD + ) -> DWORD; + pub fn GetIfEntry( + pIfRow: PMIB_IFROW, + ) -> DWORD; + pub fn GetIfTable( + pIfTable: PMIB_IFTABLE, + pdwSize: PULONG, + bOrder: BOOL, + ) -> DWORD; + pub fn GetIpAddrTable( + pIpAddrTable: PMIB_IPADDRTABLE, + pdwSize: PULONG, + bOrder: BOOL, + ) -> DWORD; + pub fn GetIpNetTable( + IpNetTable: PMIB_IPNETTABLE, + SizePointer: PULONG, + Order: BOOL, + ) -> ULONG; + pub fn GetIpForwardTable( + pIpForwardTable: PMIB_IPFORWARDTABLE, + pdwSize: PULONG, + bOrder: BOOL, + ) -> DWORD; + pub fn GetTcpTable( + TcpTable: PMIB_TCPTABLE, + SizePointer: PULONG, + Order: BOOL, + ) -> ULONG; + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365928(v=vs.85).aspx + pub fn GetExtendedTcpTable( + pTcpTable: PVOID, + pdwSize: PDWORD, + bOrder: BOOL, + ulAf: ULONG, + TableClass: TCP_TABLE_CLASS, + Reserved: ULONG, + ) -> DWORD; + pub fn GetOwnerModuleFromTcpEntry( + pTcpEntry: PMIB_TCPROW_OWNER_MODULE, + Class: TCPIP_OWNER_MODULE_INFO_CLASS, + pBuffer: PVOID, + pdwSize: PDWORD, + ) -> DWORD; + pub fn GetUdpTable( + UdpTable: PMIB_UDPTABLE, + SizePointer: PULONG, + Order: BOOL, + ) -> ULONG; + pub fn GetExtendedUdpTable( + pUdpTable: PVOID, + pdwSize: PDWORD, + bOrder: BOOL, + ulAf: ULONG, + TableClass: UDP_TABLE_CLASS, + Reserved: ULONG, + ) -> DWORD; + pub fn GetOwnerModuleFromUdpEntry( + pUdpEntry: PMIB_UDPROW_OWNER_MODULE, + Class: TCPIP_OWNER_MODULE_INFO_CLASS, + pBuffer: PVOID, + pdwSize: PDWORD, + ) -> DWORD; + pub fn GetTcpTable2( + TcpTable: PMIB_TCPTABLE2, + SizePointer: PULONG, + Order: BOOL, + ) -> ULONG; + // Deprecated APIs, Added for documentation. + // pub fn AllocateAndGetTcpExTableFromStack() -> DWORD; + // pub fn AllocateAndGetUdpExTableFromStack() -> DWORD; + pub fn GetTcp6Table( + TcpTable: PMIB_TCP6TABLE, + SizePointer: PULONG, + Order: BOOL, + ) -> ULONG; + pub fn GetTcp6Table2( + TcpTable: PMIB_TCP6TABLE2, + SizePointer: PULONG, + Order: BOOL, + ) -> ULONG; + pub fn GetPerTcpConnectionEStats( + Row: PMIB_TCPROW, + EstatsType: TCP_ESTATS_TYPE, + Rw: PUCHAR, + RwVersion: ULONG, + RwSize: ULONG, + Ros: PUCHAR, + RosVersion: ULONG, + RosSize: ULONG, + Rod: PUCHAR, + RodVersion: ULONG, + RodSize: ULONG, + ) -> ULONG; + pub fn SetPerTcpConnectionEStats( + Row: PMIB_TCPROW, + EstatsType: TCP_ESTATS_TYPE, + Rw: PUCHAR, + RwVersion: ULONG, + RwSize: ULONG, + Offset: ULONG, + ) -> ULONG; + pub fn GetPerTcp6ConnectionEStats( + Row: PMIB_TCP6ROW, + EstatsType: TCP_ESTATS_TYPE, + Rw: PUCHAR, + RwVersion: ULONG, + RwSize: ULONG, + Ros: PUCHAR, + RosVersion: ULONG, + RosSize: ULONG, + Rod: PUCHAR, + RodVersion: ULONG, + RodSize: ULONG, + ) -> ULONG; + pub fn SetPerTcp6ConnectionEStats( + Row: PMIB_TCP6ROW, + EstatsType: TCP_ESTATS_TYPE, + Rw: PUCHAR, + RwVersion: ULONG, + RwSize: ULONG, + Offset: ULONG, + ) -> ULONG; + pub fn GetOwnerModuleFromTcp6Entry( + pTcpEntry: PMIB_TCP6ROW_OWNER_MODULE, + Class: TCPIP_OWNER_MODULE_INFO_CLASS, + pBuffer: PVOID, + pdwSize: PDWORD, + ) -> DWORD; + pub fn GetUdp6Table( + Udp6Table: PMIB_UDP6TABLE, + SizePointer: PULONG, + Order: BOOL, + ) -> ULONG; + pub fn GetOwnerModuleFromUdp6Entry( + pUdpEntry: PMIB_UDP6ROW_OWNER_MODULE, + Class: TCPIP_OWNER_MODULE_INFO_CLASS, + pBuffer: PVOID, + pdwSize: PDWORD, + ) -> DWORD; + pub fn GetOwnerModuleFromPidAndInfo( + ulPid: ULONG, + pInfo: *mut ULONGLONG, + Class: TCPIP_OWNER_MODULE_INFO_CLASS, + pBuffer: PVOID, + pdwSize: PDWORD, + ) -> DWORD; + pub fn GetIpStatistics( + Statistics: PMIB_IPSTATS, + ) -> ULONG; + pub fn GetIcmpStatistics( + Statistics: PMIB_ICMP, + ) -> ULONG; + pub fn GetTcpStatistics( + Statistics: PMIB_TCPSTATS, + ) -> ULONG; + pub fn GetUdpStatistics( + Stats: PMIB_UDPSTATS, + ) -> ULONG; + pub fn SetIpStatisticsEx( + Statistics: PMIB_IPSTATS, + Family: ULONG, + ) -> ULONG; + pub fn GetIpStatisticsEx( + Statistics: PMIB_IPSTATS, + Family: ULONG, + ) -> ULONG; + pub fn GetIcmpStatisticsEx( + Statistics: PMIB_ICMP_EX, + Family: ULONG, + ) -> ULONG; + pub fn GetTcpStatisticsEx( + Statistics: PMIB_TCPSTATS, + Family: ULONG, + ) -> ULONG; + pub fn GetUdpStatisticsEx( + Statistics: PMIB_UDPSTATS, + Family: ULONG, + ) -> ULONG; + pub fn GetTcpStatisticsEx2( + Statistics: PMIB_TCPSTATS2, + Family: ULONG, + ) -> ULONG; + pub fn GetUdpStatisticsEx2( + Statistics: PMIB_UDPSTATS2, + Family: ULONG, + ) -> ULONG; + pub fn SetIfEntry( + pIfRow: PMIB_IFROW, + ) -> DWORD; + pub fn CreateIpForwardEntry( + pRoute: PMIB_IPFORWARDROW, + ) -> DWORD; + pub fn SetIpForwardEntry( + pRoute: PMIB_IPFORWARDROW, + ) -> DWORD; + pub fn DeleteIpForwardEntry( + pRoute: PMIB_IPFORWARDROW, + ) -> DWORD; + pub fn SetIpStatistics( + pIpStats: PMIB_IPSTATS, + ) -> DWORD; + pub fn SetIpTTL( + nTTL: UINT, + ) -> DWORD; + pub fn CreateIpNetEntry( + pArpEntry: PMIB_IPNETROW, + ) -> DWORD; + pub fn SetIpNetEntry( + pArpEntry: PMIB_IPNETROW, + ) -> DWORD; + pub fn DeleteIpNetEntry( + pArpEntry: PMIB_IPNETROW, + ) -> DWORD; + pub fn FlushIpNetTable( + dwIfIndex: DWORD, + ) -> DWORD; + pub fn CreateProxyArpEntry( + dwAddress: DWORD, + dwMask: DWORD, + dwIfIndex: DWORD, + ) -> DWORD; + pub fn DeleteProxyArpEntry( + dwAddress: DWORD, + dwMask: DWORD, + dwIfIndex: DWORD, + ) -> DWORD; + pub fn SetTcpEntry( + pTcpRow: PMIB_TCPROW, + ) -> DWORD; + pub fn GetInterfaceInfo( + pIfTable: PIP_INTERFACE_INFO, + dwOutBufLen: PULONG, + ) -> DWORD; + pub fn GetUniDirectionalAdapterInfo( + pIPIfInfo: PIP_UNIDIRECTIONAL_ADAPTER_ADDRESS, + dwOutBufLen: PULONG, + ) -> DWORD; + pub fn NhpAllocateAndGetInterfaceInfoFromStack( + ppTable: *mut PIP_INTERFACE_NAME_INFO, + pdwCount: PDWORD, + bOrder: BOOL, + hHeap: HANDLE, + dwFlags: DWORD, + ) -> DWORD; + pub fn GetBestInterface( + dwDestAddr: IPAddr, + pdwBestIfIndex: PDWORD, + ) -> DWORD; + pub fn GetBestInterfaceEx( + pDestAddr: PSOCKADDR, + pdwBestIfIndex: PDWORD, + ) -> DWORD; + pub fn GetBestRoute( + dwDestAddr: DWORD, + dwSourceAddr: DWORD, + pBestRoute: PMIB_IPFORWARDROW, + ) -> DWORD; + pub fn NotifyAddrChange( + Handle: PHANDLE, + overlapped: LPOVERLAPPED, + ) -> DWORD; + pub fn NotifyRouteChange( + Handle: PHANDLE, + overlapped: LPOVERLAPPED, + ) -> DWORD; + pub fn CancelIPChangeNotify( + notifyOverlapped: LPOVERLAPPED + ) -> BOOL; + pub fn GetAdapterIndex( + AdapterName: LPWSTR, + IfIndex: PULONG, + ) -> DWORD; + pub fn AddIPAddress( + Address: IPAddr, + IpMask: IPMask, + IfIndex: DWORD, + NTEContext: PULONG, + NTEInstance: PULONG, + ) -> DWORD; + pub fn DeleteIPAddress( + NTEContext: ULONG, + ) -> DWORD; + pub fn GetNetworkParams( + pFixedInfo: PFIXED_INFO, + pOutBufLen: PULONG, + ) -> DWORD; + pub fn GetAdaptersInfo( + AdapterInfo: PIP_ADAPTER_INFO, + SizePointer: PULONG, + ) -> ULONG; + pub fn GetAdapterOrderMap() -> PIP_ADAPTER_ORDER_MAP; + pub fn GetAdaptersAddresses( + Family: ULONG, + Flags: ULONG, + Reserved: PVOID, + AdapterAddresses: PIP_ADAPTER_ADDRESSES, + SizePointer: PULONG, + ) -> ULONG; + pub fn GetPerAdapterInfo( + IfIndex: ULONG, + pPerAdapterInfo: PIP_PER_ADAPTER_INFO, + pOutBufLen: PULONG, + ) -> DWORD; +} +STRUCT!{struct INTERFACE_TIMESTAMP_CAPABILITY_FLAGS { + PtpV2OverUdpIPv4EventMsgReceiveHw: BOOLEAN, + PtpV2OverUdpIPv4AllMsgReceiveHw: BOOLEAN, + PtpV2OverUdpIPv4EventMsgTransmitHw: BOOLEAN, + PtpV2OverUdpIPv4AllMsgTransmitHw: BOOLEAN, + PtpV2OverUdpIPv6EventMsgReceiveHw: BOOLEAN, + PtpV2OverUdpIPv6AllMsgReceiveHw: BOOLEAN, + PtpV2OverUdpIPv6EventMsgTransmitHw: BOOLEAN, + PtpV2OverUdpIPv6AllMsgTransmitHw: BOOLEAN, + AllReceiveHw: BOOLEAN, + AllTransmitHw: BOOLEAN, + TaggedTransmitHw: BOOLEAN, + AllReceiveSw: BOOLEAN, + AllTransmitSw: BOOLEAN, + TaggedTransmitSw: BOOLEAN, +}} +pub type PINTERFACE_TIMESTAMP_CAPABILITY_FLAGS = *mut INTERFACE_TIMESTAMP_CAPABILITY_FLAGS; +STRUCT!{struct INTERFACE_TIMESTAMP_CAPABILITIES { + Version: ULONG, + HardwareClockFrequencyHz: ULONG64, + CrossTimestamp: BOOLEAN, + Reserved1: ULONG64, + Reserved2: ULONG64, + TimestampFlags: INTERFACE_TIMESTAMP_CAPABILITY_FLAGS, +}} +pub type PINTERFACE_TIMESTAMP_CAPABILITIES = *mut INTERFACE_TIMESTAMP_CAPABILITIES; +STRUCT!{struct INTERFACE_HARDWARE_CROSSTIMESTAMP { + Version: ULONG, + Flags: ULONG, + SystemTimestamp1: ULONG64, + HardwareClockTimestamp: ULONG64, + SystemTimestamp2: ULONG64, +}} +pub type PINTERFACE_HARDWARE_CROSSTIMESTAMP = *mut INTERFACE_HARDWARE_CROSSTIMESTAMP; +DECLARE_HANDLE!{HIFTIMESTAMPCHANGE, HIFTIMESTAMPCHANGE__} +extern "system" { + pub fn GetInterfaceCurrentTimestampCapabilities( + InterfaceLuid: *const NET_LUID, + TimestampCapabilite: PINTERFACE_TIMESTAMP_CAPABILITIES, + ) -> DWORD; + pub fn GetInterfaceHardwareTimestampCapabilities( + InterfaceLuid: *const NET_LUID, + TimestampCapabilite: PINTERFACE_TIMESTAMP_CAPABILITIES, + ) -> DWORD; + pub fn CaptureInterfaceHardwareCrossTimestamp( + InterfaceLuid: *const NET_LUID, + CrossTimestamp: PINTERFACE_HARDWARE_CROSSTIMESTAMP, + ) -> DWORD; +} +FN!{stdcall INTERFACE_TIMESTAMP_CONFIG_CHANGE_CALLBACK( + CallerContext: PVOID, +) -> ()} +pub type PINTERFACE_TIMESTAMP_CONFIG_CHANGE_CALLBACK = *mut + INTERFACE_TIMESTAMP_CONFIG_CHANGE_CALLBACK; +extern "system" { + pub fn NotifyIfTimestampConfigChange( + CallerContext: PVOID, + Callback: PINTERFACE_TIMESTAMP_CONFIG_CHANGE_CALLBACK, + NotificationHandle: *mut HIFTIMESTAMPCHANGE, + ) -> DWORD; + pub fn CancelIfTimestampConfigChange( + NotificationHandle: HIFTIMESTAMPCHANGE, + ); + pub fn IpReleaseAddress( + AdapterInfo: PIP_ADAPTER_INDEX_MAP, + ) -> DWORD; + pub fn IpRenewAddress( + AdapterInfo: PIP_ADAPTER_INDEX_MAP, + ) -> DWORD; + pub fn SendARP( + DestIP: IPAddr, + SrcIP: IPAddr, + pMacAddr: PVOID, + PhyAddrLen: PULONG, + ) -> DWORD; + pub fn GetRTTAndHopCount( + DestIpAddress: IPAddr, + HopCount: PULONG, + MaxHops: ULONG, + RTT: PULONG, + ) -> BOOL; + pub fn GetFriendlyIfIndex( + IfIndex: DWORD, + ) -> DWORD; + pub fn EnableRouter( + pHandle: *mut HANDLE, + pOverlapped: *mut OVERLAPPED, + ) -> DWORD; + pub fn UnenableRouter( + pOverlapped: *mut OVERLAPPED, + lpdwEnableCount: LPDWORD, + ) -> DWORD; + pub fn DisableMediaSense( + pHandle: *mut HANDLE, + pOverLapped: *mut OVERLAPPED, + ) -> DWORD; + pub fn RestoreMediaSense( + pOverlapped: *mut OVERLAPPED, + lpdwEnableCount: LPDWORD, + ) -> DWORD; + pub fn GetIpErrorString( + ErrorCode: IP_STATUS, + Buffer: PWSTR, + Size: PDWORD, + ) -> DWORD; + pub fn ResolveNeighbor( + NetworkAddress: *mut SOCKADDR, + PhysicalAddress: PVOID, + PhysicalAddressLength: PULONG, + ) -> ULONG; + pub fn CreatePersistentTcpPortReservation( + StartPort: USHORT, + NumberOfPorts: USHORT, + Token: PULONG64, + ) -> ULONG; + pub fn CreatePersistentUdpPortReservation( + StartPort: USHORT, + NumberOfPorts: USHORT, + Token: PULONG64, + ) -> ULONG; + pub fn DeletePersistentTcpPortReservation( + StartPort: USHORT, + NumberOfPorts: USHORT, + ) -> ULONG; + pub fn DeletePersistentUdpPortReservation( + StartPort: USHORT, + NumberOfPorts: USHORT, + ) -> ULONG; + pub fn LookupPersistentTcpPortReservation( + StartPort: USHORT, + NumberOfPorts: USHORT, + Token: PULONG64, + ) -> ULONG; + pub fn LookupPersistentUdpPortReservation( + StartPort: USHORT, + NumberOfPorts: USHORT, + Token: PULONG64, + ) -> ULONG; +} +ENUM!{enum NET_ADDRESS_FORMAT { + NET_ADDRESS_FORMAT_UNSPECIFIED = 0, + NET_ADDRESS_DNS_NAME = 1, + NET_ADDRESS_IPV4 = 2, + NET_ADDRESS_IPV6 = 3, +}} +pub const DNS_MAX_NAME_BUFFER_LENGTH: usize = 256; +STRUCT!{struct NET_ADDRESS_INFO_u_s { + Address: [WCHAR; DNS_MAX_NAME_BUFFER_LENGTH], + Port: [WCHAR; 6], +}} +UNION!{union NET_ADDRESS_INFO_u { + [u32; 131], + NamedAddress NamedAddress_mut: NET_ADDRESS_INFO_u_s, + Ipv4Address Ipv4Address_mut: SOCKADDR_IN, + Ipv6Address Ipv6Address_mut: SOCKADDR_IN6, + IpAddress IpAddress_mut: SOCKADDR, +}} +STRUCT!{struct NET_ADDRESS_INFO { + Format: NET_ADDRESS_FORMAT, + u: NET_ADDRESS_INFO_u, +}} +pub type PNET_ADDRESS_INFO = *mut NET_ADDRESS_INFO; +extern "system" { + // #if defined (_WS2DEF_) && defined (_WS2IPDEF_) && defined(_WINDNS_INCLUDED_) + pub fn ParseNetworkString( + NetworkString: *const *mut WCHAR, + Types: DWORD, + AddressInfo: PNET_ADDRESS_INFO, + PortNumber: *mut USHORT, + PrefixLength: *mut BYTE, + ) -> DWORD; +} diff -Nru cargo-0.44.1/vendor/winapi/src/um/iptypes.rs cargo-0.47.0/vendor/winapi/src/um/iptypes.rs --- cargo-0.44.1/vendor/winapi/src/um/iptypes.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/iptypes.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,372 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +use shared::basetsd::{UINT8, ULONG64}; +use shared::guiddef::GUID; +use shared::ifdef::{ + IF_INDEX, IF_LUID, IF_OPER_STATUS, NET_IF_COMPARTMENT_ID, NET_IF_CONNECTION_TYPE, + NET_IF_NETWORK_GUID, TUNNEL_TYPE +}; +use shared::ipifcons::IFTYPE; +use shared::minwindef::{BOOL, BYTE, DWORD, UCHAR, UINT}; +use shared::nldef::{NL_DAD_STATE, NL_PREFIX_ORIGIN, NL_SUFFIX_ORIGIN}; +use shared::ntdef::{CHAR, PCHAR, PWCHAR, ULONG, ULONGLONG, WCHAR}; +use shared::ws2def::SOCKET_ADDRESS; +use ucrt::corecrt::time_t; +pub const MAX_ADAPTER_DESCRIPTION_LENGTH: usize = 128; +pub const MAX_ADAPTER_NAME_LENGTH: usize = 256; +pub const MAX_ADAPTER_ADDRESS_LENGTH: usize = 8; +pub const DEFAULT_MINIMUM_ENTITIES: usize = 32; +pub const MAX_HOSTNAME_LEN: usize = 128; +pub const MAX_DOMAIN_NAME_LEN: usize = 128; +pub const MAX_SCOPE_ID_LEN: usize = 256; +pub const MAX_DHCPV6_DUID_LENGTH: usize = 130; +pub const MAX_DNS_SUFFIX_STRING_LENGTH: usize = 256; +pub const BROADCAST_NODETYPE: usize = 1; +pub const PEER_TO_PEER_NODETYPE: usize = 2; +pub const MIXED_NODETYPE: usize = 4; +pub const HYBRID_NODETYPE: usize = 8; +STRUCT!{struct IP_ADDRESS_STRING { + String: [CHAR; 4*4], +}} +pub type PIP_ADDRESS_STRING = *mut IP_ADDRESS_STRING; +pub type IP_MASK_STRING = IP_ADDRESS_STRING; +pub type PIP_MASK_STRING = *mut IP_MASK_STRING; +STRUCT!{struct IP_ADDR_STRING { + Next: *mut IP_ADDR_STRING, + IpAddress: IP_ADDRESS_STRING, + IpMask: IP_MASK_STRING, + Context: DWORD, +}} +pub type PIP_ADDR_STRING = *mut IP_ADDR_STRING; +STRUCT!{struct IP_ADAPTER_INFO { + Next: *mut IP_ADAPTER_INFO, + ComboIndex: DWORD, + AdapterName: [CHAR; MAX_ADAPTER_NAME_LENGTH + 4], + Description: [CHAR; MAX_ADAPTER_DESCRIPTION_LENGTH + 4], + AddressLength: UINT, + Address: [BYTE; MAX_ADAPTER_ADDRESS_LENGTH], + Index: DWORD, + Type: UINT, + DhcpEnabled: UINT, + CurrentIpAddress: PIP_ADDR_STRING, + IpAddressList: IP_ADDR_STRING, + GatewayList: IP_ADDR_STRING, + DhcpServer: IP_ADDR_STRING, + HaveWins: BOOL, + PrimaryWinsServer: IP_ADDR_STRING, + SecondaryWinsServer: IP_ADDR_STRING, + LeaseObtained: time_t, + LeaseExpires: time_t, +}} +pub type PIP_ADAPTER_INFO = *mut IP_ADAPTER_INFO; +pub type IP_PREFIX_ORIGIN = NL_PREFIX_ORIGIN; +pub type IP_SUFFIX_ORIGIN = NL_SUFFIX_ORIGIN; +pub type IP_DAD_STATE = NL_DAD_STATE; +STRUCT!{struct IP_ADAPTER_UNICAST_ADDRESS_LH_u_s { + Length: ULONG, + Flags: DWORD, +}} +UNION!{union IP_ADAPTER_UNICAST_ADDRESS_LH_u { + [u64; 1], + Alignment Alignment_mut: ULONGLONG, + s s_mut: IP_ADAPTER_UNICAST_ADDRESS_LH_u_s, +}} +STRUCT!{struct IP_ADAPTER_UNICAST_ADDRESS_LH { + u: IP_ADAPTER_UNICAST_ADDRESS_LH_u, + Next: *mut IP_ADAPTER_UNICAST_ADDRESS_LH, + Address: SOCKET_ADDRESS, + PrefixOrigin: IP_PREFIX_ORIGIN, + SuffixOrigin: IP_SUFFIX_ORIGIN, + DadState: IP_DAD_STATE, + ValidLifetime: ULONG, + PreferredLifetime: ULONG, + LeaseLifetime: ULONG, + OnLinkPrefixLength: UINT8, +}} +pub type PIP_ADAPTER_UNICAST_ADDRESS_LH = *mut IP_ADAPTER_UNICAST_ADDRESS_LH; +STRUCT!{struct IP_ADAPTER_UNICAST_ADDRESS_XP_u_s { + Length: ULONG, + Flags: DWORD, +}} +UNION!{union IP_ADAPTER_UNICAST_ADDRESS_XP_u { + [u64; 1], + Alignment Alignment_mut: ULONGLONG, + s s_mut: IP_ADAPTER_UNICAST_ADDRESS_XP_u_s, +}} +STRUCT!{struct IP_ADAPTER_UNICAST_ADDRESS_XP { + u: IP_ADAPTER_UNICAST_ADDRESS_XP_u, + Next: *mut IP_ADAPTER_UNICAST_ADDRESS_XP, + Address: SOCKET_ADDRESS, + PrefixOrigin: IP_PREFIX_ORIGIN, + SuffixOrigin: IP_SUFFIX_ORIGIN, + DadState: IP_DAD_STATE, + ValidLifetime: ULONG, + PreferredLifetime: ULONG, + LeaseLifetime: ULONG, +}} +pub type PIP_ADAPTER_UNICAST_ADDRESS_XP = *mut IP_ADAPTER_UNICAST_ADDRESS_XP; +pub type IP_ADAPTER_UNICAST_ADDRESS = IP_ADAPTER_UNICAST_ADDRESS_LH; +// pub type IP_ADAPTER_UNICAST_ADDRESS = IP_ADAPTER_UNICAST_ADDRESS_XP; +pub type PIP_ADAPTER_UNICAST_ADDRESS = *mut IP_ADAPTER_UNICAST_ADDRESS; +pub const IP_ADAPTER_ADDRESS_DNS_ELIGIBLE: usize = 0x01; +pub const IP_ADAPTER_ADDRESS_TRANSIENT: usize = 0x02; +STRUCT!{struct IP_ADAPTER_ANYCAST_ADDRESS_XP_u_s { + Length: ULONG, + Flags: DWORD, +}} +UNION!{union IP_ADAPTER_ANYCAST_ADDRESS_XP_u { + [u64; 1], + Alignment Alignment_mut: ULONGLONG, + s s_mut: IP_ADAPTER_ANYCAST_ADDRESS_XP_u_s, +}} +STRUCT!{struct IP_ADAPTER_ANYCAST_ADDRESS_XP { + u: IP_ADAPTER_ANYCAST_ADDRESS_XP_u, + Next: *mut IP_ADAPTER_ANYCAST_ADDRESS_XP, + Address: SOCKET_ADDRESS, +}} +pub type PIP_ADAPTER_ANYCAST_ADDRESS_XP = *mut IP_ADAPTER_ANYCAST_ADDRESS_XP; +pub type IP_ADAPTER_ANYCAST_ADDRESS = IP_ADAPTER_ANYCAST_ADDRESS_XP; +pub type PIP_ADAPTER_ANYCAST_ADDRESS = *mut IP_ADAPTER_ANYCAST_ADDRESS; +STRUCT!{struct IP_ADAPTER_MULTICAST_ADDRESS_XP_u_s { + Length: ULONG, + Flags: DWORD, +}} +UNION!{union IP_ADAPTER_MULTICAST_ADDRESS_XP_u { + [u64; 1], + Alignment Alignment_mut: ULONGLONG, + s s_mut: IP_ADAPTER_MULTICAST_ADDRESS_XP_u_s, +}} +STRUCT!{struct IP_ADAPTER_MULTICAST_ADDRESS_XP { + u: IP_ADAPTER_MULTICAST_ADDRESS_XP_u, + Next: *mut IP_ADAPTER_MULTICAST_ADDRESS_XP, + Address: SOCKET_ADDRESS, +}} +pub type PIP_ADAPTER_MULTICAST_ADDRESS_XP = *mut IP_ADAPTER_MULTICAST_ADDRESS_XP; +pub type IP_ADAPTER_MULTICAST_ADDRESS = IP_ADAPTER_MULTICAST_ADDRESS_XP; +pub type PIP_ADAPTER_MULTICAST_ADDRESS = *mut IP_ADAPTER_MULTICAST_ADDRESS_XP; +STRUCT!{struct IP_ADAPTER_DNS_SERVER_ADDRESS_XP_u_s { + Length: ULONG, + Reserved: DWORD, +}} +UNION!{union IP_ADAPTER_DNS_SERVER_ADDRESS_XP_u { + [u64; 1], + Alignment Alignment_mut: ULONGLONG, + s s_mut: IP_ADAPTER_DNS_SERVER_ADDRESS_XP_u_s, +}} +STRUCT!{struct IP_ADAPTER_DNS_SERVER_ADDRESS_XP { + u: IP_ADAPTER_DNS_SERVER_ADDRESS_XP_u, + Next: *mut IP_ADAPTER_DNS_SERVER_ADDRESS_XP, + Address: SOCKET_ADDRESS, +}} +pub type PIP_ADAPTER_DNS_SERVER_ADDRESS_XP = *mut IP_ADAPTER_DNS_SERVER_ADDRESS_XP; +pub type IP_ADAPTER_DNS_SERVER_ADDRESS = IP_ADAPTER_DNS_SERVER_ADDRESS_XP; +pub type PIP_ADAPTER_DNS_SERVER_ADDRESS = *mut IP_ADAPTER_DNS_SERVER_ADDRESS_XP; +STRUCT!{struct IP_ADAPTER_WINS_SERVER_ADDRESS_LH_u_s { + Length: ULONG, + Reserved: DWORD, +}} +UNION!{union IP_ADAPTER_WINS_SERVER_ADDRESS_LH_u { + [u64; 1], + Alignment Alignment_mut: ULONGLONG, + s s_mut: IP_ADAPTER_WINS_SERVER_ADDRESS_LH_u_s, +}} +STRUCT!{struct IP_ADAPTER_WINS_SERVER_ADDRESS_LH { + u: IP_ADAPTER_WINS_SERVER_ADDRESS_LH_u, + Next: *mut IP_ADAPTER_WINS_SERVER_ADDRESS_LH, + Address: SOCKET_ADDRESS, +}} +pub type PIP_ADAPTER_WINS_SERVER_ADDRESS_LH = *mut IP_ADAPTER_WINS_SERVER_ADDRESS_LH; +pub type IP_ADAPTER_WINS_SERVER_ADDRESS = IP_ADAPTER_WINS_SERVER_ADDRESS_LH; +pub type PIP_ADAPTER_WINS_SERVER_ADDRESS = *mut IP_ADAPTER_WINS_SERVER_ADDRESS_LH; +STRUCT!{struct IP_ADAPTER_GATEWAY_ADDRESS_LH_u_s { + Length: ULONG, + Reserved: DWORD, +}} +UNION!{union IP_ADAPTER_GATEWAY_ADDRESS_LH_u { + [u64; 1], + Alignment Alignment_mut: ULONGLONG, + s s_mut: IP_ADAPTER_GATEWAY_ADDRESS_LH_u_s, +}} +STRUCT!{struct IP_ADAPTER_GATEWAY_ADDRESS_LH { + u: IP_ADAPTER_GATEWAY_ADDRESS_LH_u, + Next: *mut IP_ADAPTER_GATEWAY_ADDRESS_LH, + Address: SOCKET_ADDRESS, +}} +pub type PIP_ADAPTER_GATEWAY_ADDRESS_LH = *mut IP_ADAPTER_GATEWAY_ADDRESS_LH; +pub type IP_ADAPTER_GATEWAY_ADDRESS = IP_ADAPTER_GATEWAY_ADDRESS_LH; +pub type PIP_ADAPTER_GATEWAY_ADDRESS = *mut IP_ADAPTER_GATEWAY_ADDRESS_LH; +STRUCT!{struct IP_ADAPTER_PREFIX_XP_u_s { + Length: ULONG, + Flags: DWORD, +}} +UNION!{union IP_ADAPTER_PREFIX_XP_u { + [u64; 1], + Alignment Alignment_mut: ULONGLONG, + s s_mut: IP_ADAPTER_PREFIX_XP_u_s, +}} +STRUCT!{struct IP_ADAPTER_PREFIX_XP { + u: IP_ADAPTER_PREFIX_XP_u, + Next: *mut IP_ADAPTER_PREFIX_XP, + Address: SOCKET_ADDRESS, + PrefixLength: ULONG, +}} +pub type PIP_ADAPTER_PREFIX_XP = *mut IP_ADAPTER_PREFIX_XP; +pub type IP_ADAPTER_PREFIX = IP_ADAPTER_PREFIX_XP; +pub type PIP_ADAPTER_PREFIX = *mut IP_ADAPTER_PREFIX_XP; +STRUCT!{struct IP_ADAPTER_DNS_SUFFIX { + Next: *mut IP_ADAPTER_DNS_SUFFIX, + String: [WCHAR; MAX_DNS_SUFFIX_STRING_LENGTH], +}} +pub type PIP_ADAPTER_DNS_SUFFIX = *mut IP_ADAPTER_DNS_SUFFIX; +pub const IP_ADAPTER_DDNS_ENABLED: DWORD = 0x00000001; +pub const IP_ADAPTER_REGISTER_ADAPTER_SUFFIX: DWORD = 0x00000002; +pub const IP_ADAPTER_DHCP_ENABLED: DWORD = 0x00000004; +pub const IP_ADAPTER_RECEIVE_ONLY: DWORD = 0x00000008; +pub const IP_ADAPTER_NO_MULTICAST: DWORD = 0x00000010; +pub const IP_ADAPTER_IPV6_OTHER_STATEFUL_CONFIG: DWORD = 0x00000020; +pub const IP_ADAPTER_NETBIOS_OVER_TCPIP_ENABLED: DWORD = 0x00000040; +pub const IP_ADAPTER_IPV4_ENABLED: DWORD = 0x00000080; +pub const IP_ADAPTER_IPV6_ENABLED: DWORD = 0x00000100; +pub const IP_ADAPTER_IPV6_MANAGE_ADDRESS_CONFIG: DWORD = 0x00000200; +STRUCT!{struct IP_ADAPTER_ADDRESSES_LH_u_s { + Length: ULONG, + IfIndex: IF_INDEX, +}} +UNION!{union IP_ADAPTER_ADDRESSES_LH_u { + [u64; 1], + Alignment Alignment_mut: ULONGLONG, + s s_mut: IP_ADAPTER_ADDRESSES_LH_u_s, +}} +STRUCT!{struct IP_ADAPTER_ADDRESSES_LH { + u: IP_ADAPTER_ADDRESSES_LH_u, + Next: *mut IP_ADAPTER_ADDRESSES_LH, + AdapterName: PCHAR, + FirstUnicastAddress: PIP_ADAPTER_UNICAST_ADDRESS_LH, + FirstAnycastAddress: PIP_ADAPTER_ANYCAST_ADDRESS_XP, + FirstMulticastAddress: PIP_ADAPTER_MULTICAST_ADDRESS_XP, + FirstDnsServerAddress: PIP_ADAPTER_DNS_SERVER_ADDRESS_XP, + DnsSuffix: PWCHAR, + Description: PWCHAR, + FriendlyName: PWCHAR, + PhysicalAddress: [BYTE; MAX_ADAPTER_ADDRESS_LENGTH], + PhysicalAddressLength: ULONG, + Flags: ULONG, + Mtu: ULONG, + IfType: IFTYPE, + OperStatus: IF_OPER_STATUS, + Ipv6IfIndex: IF_INDEX, + ZoneIndices: [ULONG; 16], + FirstPrefix: PIP_ADAPTER_PREFIX_XP, + TransmitLinkSpeed: ULONG64, + ReceiveLinkSpeed: ULONG64, + FirstWinsServerAddress: PIP_ADAPTER_WINS_SERVER_ADDRESS_LH, + FirstGatewayAddress: PIP_ADAPTER_GATEWAY_ADDRESS_LH, + Ipv4Metric: ULONG, + Ipv6Metric: ULONG, + Luid: IF_LUID, + Dhcpv4Server: SOCKET_ADDRESS, + CompartmentId: NET_IF_COMPARTMENT_ID, + NetworkGuid: NET_IF_NETWORK_GUID, + ConnectionType: NET_IF_CONNECTION_TYPE, + TunnelType: TUNNEL_TYPE, + Dhcpv6Server: SOCKET_ADDRESS, + Dhcpv6ClientDuid: [BYTE; MAX_DHCPV6_DUID_LENGTH], + Dhcpv6ClientDuidLength: ULONG, + Dhcpv6Iaid: ULONG, + FirstDnsSuffix: PIP_ADAPTER_DNS_SUFFIX, +}} +BITFIELD!{IP_ADAPTER_ADDRESSES_LH Flags: ULONG [ + DdnsEnabled set_DdnsEnabled[0..1], + RegisterAdapterSuffix set_RegisterAdapterSuffix[1..2], + Dhcpv4Enabled set_Dhcpv4Enabled[2..3], + ReceiveOnly set_ReceiveOnly[3..4], + NoMulticast set_NoMulticast[4..5], + Ipv6OtherStatefulConfig set_Ipv6OtherStatefulConfig[5..6], + NetbiosOverTcpipEnabled set_NetbiosOverTcpipEnabled[6..7], + Ipv4Enabled set_Ipv4Enabled[7..8], + Ipv6Enabled set_Ipv6Enabled[8..9], + Ipv6ManagedAddressConfigurationSupported set_Ipv6ManagedAddressConfigurationSupported[9..10], +]} +pub type PIP_ADAPTER_ADDRESSES_LH = *mut IP_ADAPTER_ADDRESSES_LH; +STRUCT!{struct IP_ADAPTER_ADDRESSES_XP_u_s { + Length: ULONG, + IfIndex: DWORD, +}} +UNION!{union IP_ADAPTER_ADDRESSES_XP_u { + [u64; 1], + Alignment Alignment_mut: ULONGLONG, + s s_mut: IP_ADAPTER_ADDRESSES_XP_u_s, +}} +STRUCT!{struct IP_ADAPTER_ADDRESSES_XP { + u: IP_ADAPTER_ADDRESSES_XP_u, + Next: *mut IP_ADAPTER_ADDRESSES_XP, + AdapterName: PCHAR, + FirstUnicastAddress: PIP_ADAPTER_UNICAST_ADDRESS_XP, + FirstAnycastAddress: PIP_ADAPTER_ANYCAST_ADDRESS_XP, + FirstMulticastAddress: PIP_ADAPTER_MULTICAST_ADDRESS_XP, + FirstDnsServerAddress: PIP_ADAPTER_DNS_SERVER_ADDRESS_XP, + DnsSuffix: PWCHAR, + Description: PWCHAR, + FriendlyName: PWCHAR, + PhysicalAddress: [BYTE; MAX_ADAPTER_ADDRESS_LENGTH], + PhysicalAddressLength: DWORD, + Flags: DWORD, + Mtu: DWORD, + IfType: DWORD, + OperStatus: IF_OPER_STATUS, + Ipv6IfIndex: DWORD, + ZoneIndices: [DWORD; 16], + FirstPrefix: PIP_ADAPTER_PREFIX_XP, +}} +pub type PIP_ADAPTER_ADDRESSES_XP = *mut IP_ADAPTER_ADDRESSES_XP; +pub type IP_ADAPTER_ADDRESSES = IP_ADAPTER_ADDRESSES_LH; +// pub type IP_ADAPTER_ADDRESSES = IP_ADAPTER_ADDRESSES_XP; +pub type PIP_ADAPTER_ADDRESSES = *mut IP_ADAPTER_ADDRESSES; +pub const GAA_FLAG_SKIP_UNICAST: ULONG = 0x0001; +pub const GAA_FLAG_SKIP_ANYCAST: ULONG = 0x0002; +pub const GAA_FLAG_SKIP_MULTICAST: ULONG = 0x0004; +pub const GAA_FLAG_SKIP_DNS_SERVER: ULONG = 0x0008; +pub const GAA_FLAG_INCLUDE_PREFIX: ULONG = 0x0010; +pub const GAA_FLAG_SKIP_FRIENDLY_NAME: ULONG = 0x0020; +pub const GAA_FLAG_INCLUDE_WINS_INFO: ULONG = 0x0040; +pub const GAA_FLAG_INCLUDE_GATEWAYS: ULONG = 0x0080; +pub const GAA_FLAG_INCLUDE_ALL_INTERFACES: ULONG = 0x0100; +pub const GAA_FLAG_INCLUDE_ALL_COMPARTMENTS: ULONG = 0x0200; +pub const GAA_FLAG_INCLUDE_TUNNEL_BINDINGORDER: ULONG = 0x0400; +STRUCT!{struct IP_PER_ADAPTER_INFO_W2KSP1 { + AutoconfigEnabled: UINT, + AutoconfigActive: UINT, + CurrentDnsServer: PIP_ADDR_STRING, + DnsServerList: IP_ADDR_STRING, +}} +pub type PIP_PER_ADAPTER_INFO_W2KSP1 = *mut IP_PER_ADAPTER_INFO_W2KSP1; +pub type IP_PER_ADAPTER_INFO = IP_PER_ADAPTER_INFO_W2KSP1; +pub type PIP_PER_ADAPTER_INFO = *mut IP_PER_ADAPTER_INFO; +STRUCT!{struct FIXED_INFO_W2KSP1 { + HostName: [CHAR; MAX_HOSTNAME_LEN + 4], + DomainName: [CHAR; MAX_DOMAIN_NAME_LEN + 4], + CurrentDnsServer: PIP_ADDR_STRING, + DnsServerList: IP_ADDR_STRING, + NodeType: UINT, + ScopeId: [CHAR; MAX_SCOPE_ID_LEN + 4], + EnableRouting: UINT, + EnableProxy: UINT, + EnableDns: UINT, +}} +pub type PFIXED_INFO_W2KSP1 = *mut FIXED_INFO_W2KSP1; +pub type FIXED_INFO = FIXED_INFO_W2KSP1; +pub type PFIXED_INFO = *mut FIXED_INFO; +STRUCT!{struct IP_INTERFACE_NAME_INFO_W2KSP1 { + Index: ULONG, + MediaType: ULONG, + ConnectionType: UCHAR, + AccessType: UCHAR, + DeviceGuid: GUID, + InterfaceGuid: GUID, +}} +pub type PIP_INTERFACE_NAME_INFO_W2KSP1 = *mut IP_INTERFACE_NAME_INFO_W2KSP1; +pub type IP_INTERFACE_NAME_INFO = IP_INTERFACE_NAME_INFO_W2KSP1; +pub type PIP_INTERFACE_NAME_INFO = *mut IP_INTERFACE_NAME_INFO; diff -Nru cargo-0.44.1/vendor/winapi/src/um/l2cmn.rs cargo-0.47.0/vendor/winapi/src/um/l2cmn.rs --- cargo-0.44.1/vendor/winapi/src/um/l2cmn.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/l2cmn.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,55 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +//! Definitions and data structures for common layer 2. +use shared::guiddef::GUID; +use shared::minwindef::DWORD; +use um::winnt::PVOID; +pub const L2_PROFILE_MAX_NAME_LENGTH: usize = 256; +pub const L2_NOTIFICATION_SOURCE_NONE: DWORD = 0; +pub const L2_NOTIFICATION_SOURCE_DOT3_AUTO_CONFIG: DWORD = 0x00000001; +pub const L2_NOTIFICATION_SOURCE_SECURITY: DWORD = 0x00000002; +pub const L2_NOTIFICATION_SOURCE_ONEX: DWORD = 0x00000004; +pub const L2_NOTIFICATION_SOURCE_WLAN_ACM: DWORD = 0x00000008; +pub const L2_NOTIFICATION_SOURCE_WLAN_MSM: DWORD = 0x00000010; +pub const L2_NOTIFICATION_SOURCE_WLAN_SECURITY: DWORD = 0x00000020; +pub const L2_NOTIFICATION_SOURCE_WLAN_IHV: DWORD = 0x00000040; +pub const L2_NOTIFICATION_SOURCE_WLAN_HNWK: DWORD = 0x00000080; +pub const L2_NOTIFICATION_SOURCE_WCM: DWORD = 0x00000100; +pub const L2_NOTIFICATION_SOURCE_WCM_CSP: DWORD = 0x00000200; +pub const L2_NOTIFICATION_SOURCE_WFD: DWORD = 0x00000400; +pub const L2_NOTIFICATION_SOURCE_ALL: DWORD = 0x0000ffff; +pub const L2_NOTIFICATION_CODE_PUBLIC_BEGIN: DWORD = 0x00000000; +pub const L2_NOTIFICATION_CODE_GROUP_SIZE: DWORD = 0x00001000; +pub const L2_NOTIFICATION_CODE_V2_BEGIN: DWORD = L2_NOTIFICATION_CODE_PUBLIC_BEGIN + + L2_NOTIFICATION_CODE_GROUP_SIZE; +pub const L2_REASON_CODE_GROUP_SIZE: u32 = 0x10000; +pub const L2_REASON_CODE_GEN_BASE: u32 = 0x10000; +pub const L2_REASON_CODE_DOT11_AC_BASE: u32 = L2_REASON_CODE_GEN_BASE + L2_REASON_CODE_GROUP_SIZE; +pub const L2_REASON_CODE_DOT11_MSM_BASE: u32 = L2_REASON_CODE_DOT11_AC_BASE + + L2_REASON_CODE_GROUP_SIZE; +pub const L2_REASON_CODE_DOT11_SECURITY_BASE: u32 = L2_REASON_CODE_DOT11_MSM_BASE + + L2_REASON_CODE_GROUP_SIZE; +pub const L2_REASON_CODE_ONEX_BASE: u32 = L2_REASON_CODE_DOT11_SECURITY_BASE + + L2_REASON_CODE_GROUP_SIZE; +pub const L2_REASON_CODE_DOT3_AC_BASE: u32 = L2_REASON_CODE_ONEX_BASE + + L2_REASON_CODE_GROUP_SIZE; +pub const L2_REASON_CODE_DOT3_MSM_BASE: u32 = L2_REASON_CODE_DOT3_AC_BASE + + L2_REASON_CODE_GROUP_SIZE; +pub const L2_REASON_CODE_PROFILE_BASE: u32 = L2_REASON_CODE_DOT3_MSM_BASE + + L2_REASON_CODE_GROUP_SIZE; +pub const L2_REASON_CODE_IHV_BASE: u32 = L2_REASON_CODE_PROFILE_BASE + L2_REASON_CODE_GROUP_SIZE; +pub const L2_REASON_CODE_WIMAX_BASE: u32 = L2_REASON_CODE_IHV_BASE + L2_REASON_CODE_GROUP_SIZE; +pub const L2_REASON_CODE_SUCCESS: u32 = 0; +pub const L2_REASON_CODE_UNKNOWN: u32 = L2_REASON_CODE_GEN_BASE + 1; +pub const L2_REASON_CODE_PROFILE_MISSING: u32 = 0x00000001; +STRUCT!{struct L2_NOTIFICATION_DATA { + NotificationSource: DWORD, + NotificationCode: DWORD, + InterfaceGuid: GUID, + dwDataSize: DWORD, + pData: PVOID, +}} +pub type PL2_NOTIFICATION_DATA = *mut L2_NOTIFICATION_DATA; diff -Nru cargo-0.44.1/vendor/winapi/src/um/lmaccess.rs cargo-0.47.0/vendor/winapi/src/um/lmaccess.rs --- cargo-0.44.1/vendor/winapi/src/um/lmaccess.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/lmaccess.rs 2020-10-01 21:38:28.000000000 +0000 @@ -61,6 +61,7 @@ ) -> NET_API_STATUS; pub fn NetUserGetLocalGroups( servername: LPCWSTR, + username: LPCWSTR, level: DWORD, flags: DWORD, bufptr: *mut LPBYTE, diff -Nru cargo-0.44.1/vendor/winapi/src/um/mod.rs cargo-0.47.0/vendor/winapi/src/um/mod.rs --- cargo-0.44.1/vendor/winapi/src/um/mod.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/mod.rs 2020-10-01 21:38:28.000000000 +0000 @@ -7,6 +7,7 @@ pub mod gl; #[cfg(feature = "accctrl")] pub mod accctrl; #[cfg(feature = "aclapi")] pub mod aclapi; +#[cfg(feature = "adhoc")] pub mod adhoc; #[cfg(feature = "appmgmt")] pub mod appmgmt; #[cfg(feature = "audioclient")] pub mod audioclient; #[cfg(feature = "audiosessiontypes")] pub mod audiosessiontypes; @@ -89,6 +90,7 @@ #[cfg(feature = "dmusicc")] pub mod dmusicc; #[cfg(feature = "docobj")] pub mod docobj; #[cfg(feature = "documenttarget")] pub mod documenttarget; +#[cfg(feature = "dot1x")] pub mod dot1x; #[cfg(feature = "dpa_dsa")] pub mod dpa_dsa; #[cfg(feature = "dpapi")] pub mod dpapi; #[cfg(feature = "dsgetdc")] pub mod dsgetdc; @@ -105,6 +107,7 @@ #[cfg(feature = "dxgidebug")] pub mod dxgidebug; #[cfg(feature = "dxva2api")] pub mod dxva2api; #[cfg(feature = "dxvahd")] pub mod dxvahd; +#[cfg(feature = "eaptypes")] pub mod eaptypes; #[cfg(feature = "enclaveapi")] pub mod enclaveapi; #[cfg(feature = "endpointvolume")] pub mod endpointvolume; #[cfg(feature = "errhandlingapi")] pub mod errhandlingapi; @@ -120,10 +123,14 @@ #[cfg(feature = "imm")] pub mod imm; #[cfg(feature = "interlockedapi")] pub mod interlockedapi; #[cfg(feature = "ioapiset")] pub mod ioapiset; +#[cfg(feature = "ipexport")] pub mod ipexport; +#[cfg(feature = "iphlpapi")] pub mod iphlpapi; +#[cfg(feature = "iptypes")] pub mod iptypes; #[cfg(feature = "jobapi")] pub mod jobapi; #[cfg(feature = "jobapi2")] pub mod jobapi2; #[cfg(feature = "knownfolders")] pub mod knownfolders; #[cfg(feature = "ktmw32")] pub mod ktmw32; +#[cfg(feature = "l2cmn")] pub mod l2cmn; #[cfg(feature = "libloaderapi")] pub mod libloaderapi; #[cfg(feature = "lmaccess")] pub mod lmaccess; #[cfg(feature = "lmalert")] pub mod lmalert; @@ -196,6 +203,7 @@ #[cfg(feature = "restartmanager")] pub mod restartmanager; #[cfg(feature = "restrictederrorinfo")] pub mod restrictederrorinfo; #[cfg(feature = "rmxfguid")] pub mod rmxfguid; +#[cfg(feature = "rtinfo")] pub mod rtinfo; #[cfg(feature = "sapi")] pub mod sapi; #[cfg(feature = "sapi51")] pub mod sapi51; #[cfg(feature = "sapi53")] pub mod sapi53; @@ -212,6 +220,7 @@ #[cfg(feature = "shobjidl")] pub mod shobjidl; #[cfg(feature = "shobjidl_core")] pub mod shobjidl_core; #[cfg(feature = "shtypes")] pub mod shtypes; +#[cfg(feature = "softpub")] pub mod softpub; #[cfg(feature = "spapidef")] pub mod spapidef; #[cfg(feature = "spellcheck")] pub mod spellcheck; #[cfg(feature = "sporder")] pub mod sporder; @@ -276,12 +285,18 @@ #[cfg(feature = "winsock2")] pub mod winsock2; #[cfg(feature = "winspool")] pub mod winspool; #[cfg(feature = "winsvc")] pub mod winsvc; +#[cfg(feature = "wintrust")] pub mod wintrust; #[cfg(feature = "winusb")] pub mod winusb; #[cfg(feature = "winuser")] pub mod winuser; #[cfg(feature = "winver")] pub mod winver; +#[cfg(feature = "wlanapi")] pub mod wlanapi; +#[cfg(feature = "wlanihv")] pub mod wlanihv; +#[cfg(feature = "wlanihvtypes")] pub mod wlanihvtypes; +#[cfg(feature = "wlclient")] pub mod wlclient; #[cfg(feature = "wow64apiset")] pub mod wow64apiset; #[cfg(feature = "wpdmtpextensions")] pub mod wpdmtpextensions; #[cfg(feature = "ws2bth")] pub mod ws2bth; #[cfg(feature = "ws2spi")] pub mod ws2spi; #[cfg(feature = "ws2tcpip")] pub mod ws2tcpip; +#[cfg(feature = "wtsapi32")] pub mod wtsapi32; #[cfg(feature = "xinput")] pub mod xinput; diff -Nru cargo-0.44.1/vendor/winapi/src/um/oaidl.rs cargo-0.47.0/vendor/winapi/src/um/oaidl.rs --- cargo-0.44.1/vendor/winapi/src/um/oaidl.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/oaidl.rs 2020-10-01 21:38:28.000000000 +0000 @@ -869,6 +869,42 @@ pVarDesc: *mut VARDESC, ) -> (), }} +RIDL!{#[uuid(0x1cf2b120, 0x547d, 0x101b, 0x8e, 0x65, 0x08, 0x00, 0x2b, 0x2b, 0xd1, 0x19)] +interface IErrorInfo(IErrorInfoVtbl): IUnknown(IUnknownVtbl) { + fn GetGUID( + pGUID: *mut GUID, + ) -> HRESULT, + fn GetSource( + pBstrSource: *mut BSTR, + ) -> HRESULT, + fn GetDescription( + pBstrDescription: *mut BSTR, + ) -> HRESULT, + fn GetHelpFile( + pBstrHelpFile: *mut BSTR, + ) -> HRESULT, + fn GetHelpContext( + pdwHelpContext: *mut DWORD, + ) -> HRESULT, +}} +RIDL!{#[uuid(0x22f03340, 0x547d, 0x101b, 0x8e, 0x65, 0x08, 0x00, 0x2b, 0x2b, 0xd1, 0x19)] +interface ICreateErrorInfo(ICreateErrorInfoVtbl): IUnknown(IUnknownVtbl) { + fn SetGUID( + rguid: REFGUID, + ) -> HRESULT, + fn SetSource( + szSource: LPOLESTR, + ) -> HRESULT, + fn SetDescription( + szDescription: LPOLESTR, + ) -> HRESULT, + fn SetHelpFile( + szHelpFile: LPOLESTR, + ) -> HRESULT, + fn SetHelpContext( + dwHelpContext: DWORD, + ) -> HRESULT, +}} RIDL!{#[uuid(0x3127ca40, 0x446e, 0x11ce, 0x81, 0x35, 0x00, 0xaa, 0x00, 0x4b, 0xb8, 0x51)] interface IErrorLog(IErrorLogVtbl): IUnknown(IUnknownVtbl) { fn AddError( diff -Nru cargo-0.44.1/vendor/winapi/src/um/objidl.rs cargo-0.47.0/vendor/winapi/src/um/objidl.rs --- cargo-0.44.1/vendor/winapi/src/um/objidl.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/objidl.rs 2020-10-01 21:38:28.000000000 +0000 @@ -11,7 +11,7 @@ use shared::ntdef::LONG; use shared::windef::{HBITMAP, HENHMETAFILE}; use shared::wtypes::{CLIPFORMAT, HMETAFILEPICT}; -use shared::wtypesbase::{LPOLESTR, OLECHAR}; +use shared::wtypesbase::{LPCOLESTR, LPOLESTR, OLECHAR}; use um::objidlbase::{IEnumString, IStream, STATSTG}; use um::unknwnbase::{IUnknown, IUnknownVtbl}; use um::winnt::{HRESULT, ULARGE_INTEGER}; @@ -316,6 +316,24 @@ grfStatFlag: DWORD, ) -> HRESULT, }} +RIDL!{#[uuid(0x0000010b, 0x0000, 0x0000, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46)] +interface IPersistFile(IPersistFileVtbl): IPersist(IPersistVtbl) { + fn IsDirty() -> HRESULT, + fn Load( + pszFileName: LPCOLESTR, + dwMode: DWORD, + ) -> HRESULT, + fn Save( + pszFileName: LPCOLESTR, + fRemember: BOOL, + ) -> HRESULT, + fn SaveCompleted( + pszFileName: LPCOLESTR, + ) -> HRESULT, + fn GetCurFile( + ppszFileName: *mut LPOLESTR, + ) -> HRESULT, +}} STRUCT!{struct DVTARGETDEVICE { tdSize: DWORD, tdDriverNameOffset: WORD, diff -Nru cargo-0.44.1/vendor/winapi/src/um/oleauto.rs cargo-0.47.0/vendor/winapi/src/um/oleauto.rs --- cargo-0.44.1/vendor/winapi/src/um/oleauto.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/oleauto.rs 2020-10-01 21:38:28.000000000 +0000 @@ -10,7 +10,9 @@ use shared::wtypes::{BSTR, DATE, DECIMAL, LPBSTR, LPDECIMAL, VARTYPE}; use shared::wtypesbase::{DOUBLE, LPCOLESTR, LPOLESTR, OLECHAR}; use um::minwinbase::LPSYSTEMTIME; -use um::oaidl::{DISPID_UNKNOWN, ITypeLib, SAFEARRAY, VARIANT, VARIANTARG}; +use um::oaidl::{ + DISPID_UNKNOWN, ICreateErrorInfo, IErrorInfo, ITypeLib, SAFEARRAY, VARIANT, VARIANTARG +}; use um::winnt::{CHAR, HRESULT, INT, LCID, LONG, LPCSTR, SHORT}; extern "system" { pub fn SysAllocString( @@ -831,6 +833,17 @@ dwRegister: DWORD, pvReserved: *mut c_void, ); + pub fn SetErrorInfo( + dwReserved: ULONG, + perrinfo: *mut IErrorInfo, + ) -> HRESULT; + pub fn GetErrorInfo( + dwReserved: ULONG, + pperrinfo: *mut *mut IErrorInfo, + ) -> HRESULT; + pub fn CreateErrorInfo( + pperrinfo: *mut *mut ICreateErrorInfo, + ) -> HRESULT; pub fn OaBuildVersion() -> ULONG; pub fn OaEnablePerUserTLibRegistration(); } diff -Nru cargo-0.44.1/vendor/winapi/src/um/rtinfo.rs cargo-0.47.0/vendor/winapi/src/um/rtinfo.rs --- cargo-0.44.1/vendor/winapi/src/um/rtinfo.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/rtinfo.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,7 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +use shared::basetsd::DWORD_PTR; +pub const ALIGN_SIZE: DWORD_PTR = 0x00000008; diff -Nru cargo-0.44.1/vendor/winapi/src/um/shobjidl_core.rs cargo-0.47.0/vendor/winapi/src/um/shobjidl_core.rs --- cargo-0.44.1/vendor/winapi/src/um/shobjidl_core.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/shobjidl_core.rs 2020-10-01 21:38:28.000000000 +0000 @@ -6,11 +6,17 @@ use ctypes::{c_int, c_void}; use shared::guiddef::{REFGUID, REFIID}; use shared::minwindef::{BOOL, DWORD, UINT, ULONG, WORD}; -use shared::windef::{HICON, HWND, RECT}; +use shared::windef::{COLORREF, HICON, HWND, RECT}; use um::commctrl::HIMAGELIST; +use um::minwinbase::{WIN32_FIND_DATAA, WIN32_FIND_DATAW}; use um::objidl::IBindCtx; +use um::propkeydef::REFPROPERTYKEY; +use um::propsys::GETPROPERTYSTOREFLAGS; +use um::shtypes::{PCIDLIST_ABSOLUTE, PIDLIST_ABSOLUTE}; use um::unknwnbase::{IUnknown, IUnknownVtbl}; -use um::winnt::{HRESULT, LPCWSTR, LPWSTR, ULONGLONG, WCHAR}; +use um::winnt::{HRESULT, LPCSTR, LPCWSTR, LPSTR, LPWSTR, PCWSTR, ULONGLONG, WCHAR}; +DEFINE_GUID!{CLSID_DesktopWallpaper, + 0xc2cf3110, 0x460e, 0x4fc1, 0xb9, 0xd0, 0x8a, 0x1c, 0x0c, 0x9c, 0xc4, 0xbd} DEFINE_GUID!{CLSID_TaskbarList, 0x56fdf344, 0xfd6d, 0x11d0, 0x95, 0x8a, 0x00, 0x60, 0x97, 0xc9, 0xa0, 0x90} DEFINE_GUID!{CLSID_FileOpenDialog, @@ -78,6 +84,48 @@ piOrder: *mut c_int, ) -> HRESULT, }} +ENUM!{enum SIATTRIBFLAGS { + SIATTRIBFLAGS_AND = 0x1, + SIATTRIBFLAGS_OR = 0x2, + SIATTRIBFLAGS_APPCOMPAT = 0x3, + SIATTRIBFLAGS_MASK = 0x3, + SIATTRIBFLAGS_ALLITEMS = 0x4000, +}} +RIDL!{#[uuid(0xb63ea76d, 0x1f85, 0x456f, 0xa1, 0x9c, 0x48, 0x15, 0x9e, 0xfa, 0x85, 0x8b)] +interface IShellItemArray(IShellItemArrayVtbl): IUnknown(IUnknownVtbl) { + fn BindToHandler( + pbc: *mut IBindCtx, + bhid: REFGUID, + riid: REFIID, + ppvOut: *mut *mut c_void, + ) -> HRESULT, + fn GetPropertyStore( + flags: GETPROPERTYSTOREFLAGS, + riid: REFIID, + ppv: *mut *mut c_void, + ) -> HRESULT, + fn GetPropertyDescriptionList( + keyType: REFPROPERTYKEY, + riid: REFIID, + ppv: *mut *mut c_void, + ) -> HRESULT, + fn GetAttributes( + AttribFlags: SIATTRIBFLAGS, + sfgaoMask: SFGAOF, + psfgaoAttribs: *mut SFGAOF, + ) -> HRESULT, + fn GetCount( + pdwNumItems: *mut DWORD, + ) -> HRESULT, + fn GetItemAt( + dwIndex: DWORD, + ppsi: *mut *mut IShellItem, + ) -> HRESULT, + // TODO: Add IEnumShellItems + //fn EnumItems( + // ppenumShellItems: *mut *mut IEnumShellItems, + //) -> HRESULT, +}} //20869 RIDL!{#[uuid(0xb4db1657, 0x70d7, 0x485e, 0x8e, 0x3e, 0x6f, 0xcb, 0x5a, 0x5c, 0x18, 0x02)] interface IModalWindow(IModalWindowVtbl): IUnknown(IUnknownVtbl) { @@ -220,6 +268,220 @@ stpFlags: STPFLAG, ) -> HRESULT, }} +ENUM!{enum DESKTOP_SLIDESHOW_OPTIONS { + DSO_SHUFFLEIMAGES = 0x1, +}} +ENUM!{enum DESKTOP_SLIDESHOW_STATE { + DSS_ENABLED = 0x1, + DSS_SLIDESHOW = 0x2, + DSS_DISABLED_BY_REMOTE_SESSION = 0x4, +}} +ENUM!{enum DESKTOP_SLIDESHOW_DIRECTION { + DSD_FORWARD = 0, + DSD_BACKWARD = 1, +}} +ENUM!{enum DESKTOP_WALLPAPER_POSITION { + DWPOS_CENTER = 0, + DWPOS_TILE = 1, + DWPOS_STRETCH = 2, + DWPOS_FIT = 3, + DWPOS_FILL = 4, + DWPOS_SPAN = 5, +}} +RIDL!{#[uuid(0xb92b56a9, 0x8b55, 0x4e14, 0x9a, 0x89, 0x01, 0x99, 0xbb, 0xb6, 0xf9, 0x3b)] +interface IDesktopWallpaper(IDesktopWallpaperVtbl): IUnknown(IUnknownVtbl) { + fn SetWallpaper( + monitorID: LPCWSTR, + wallpaper: LPCWSTR, + ) -> HRESULT, + fn GetWallpaper( + monitorID: LPCWSTR, + wallpaper: *mut LPWSTR, + ) -> HRESULT, + fn GetMonitorDevicePathAt( + monitorIndex: UINT, + monitorID: *mut LPWSTR, + ) -> HRESULT, + fn GetMonitorDevicePathCount( + count: *mut UINT, + ) -> HRESULT, + fn GetMonitorRECT( + monitorID: LPCWSTR, + displayRect: *mut RECT, + ) -> HRESULT, + fn SetBackgroundColor( + color: COLORREF, + ) -> HRESULT, + fn GetBackgroundColor( + color: *mut COLORREF, + ) -> HRESULT, + fn SetPosition( + position: DESKTOP_WALLPAPER_POSITION, + ) -> HRESULT, + fn GetPosition( + position: *mut DESKTOP_WALLPAPER_POSITION, + ) -> HRESULT, + fn SetSlideshow( + items: *mut IShellItemArray, + ) -> HRESULT, + fn GetSlideshow( + items: *mut *mut IShellItemArray, + ) -> HRESULT, + fn SetSlideshowOptions( + options: DESKTOP_SLIDESHOW_OPTIONS, + slideshowTick: UINT, + ) -> HRESULT, + fn GetSlideshowOptions( + options: *mut DESKTOP_SLIDESHOW_OPTIONS, + slideshowTick: *mut UINT, + ) -> HRESULT, + fn AdvanceSlideshow( + monitorID: LPCWSTR, + direction: DESKTOP_SLIDESHOW_DIRECTION, + ) -> HRESULT, + fn GetStatus( + state: *mut DESKTOP_SLIDESHOW_STATE, + ) -> HRESULT, + fn Enable( + enable: BOOL, + ) -> HRESULT, +}} +RIDL!{#[uuid(0x000214ee, 0x0000, 0x0000, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46)] +interface IShellLinkA(IShellLinkAVtbl): IUnknown(IUnknownVtbl) { + fn GetPath( + pszFile: LPSTR, + cch: c_int, + pfd: *mut WIN32_FIND_DATAA, + fFlags: DWORD, + ) -> HRESULT, + fn GetIDList( + ppidl: *mut PIDLIST_ABSOLUTE, + ) -> HRESULT, + fn SetIDList( + pidl: PCIDLIST_ABSOLUTE, + ) -> HRESULT, + fn GetDescription( + pszName: LPSTR, + cch: c_int, + ) -> HRESULT, + fn SetDescription( + pszName: LPCSTR, + ) -> HRESULT, + fn GetWorkingDirectory( + pszDir: LPSTR, + cch: c_int, + ) -> HRESULT, + fn SetWorkingDirectory( + pszDir: LPCSTR, + ) -> HRESULT, + fn GetArguments( + pszArgs: LPSTR, + cch: c_int, + ) -> HRESULT, + fn SetArguments( + pszArgs: LPCSTR, + ) -> HRESULT, + fn GetHotkey( + pwHotkey: *mut WORD, + ) -> HRESULT, + fn SetHotkey( + wHotkey: WORD, + ) -> HRESULT, + fn GetShowCmd( + piShowCmd: *mut c_int, + ) -> HRESULT, + fn SetShowCmd( + iShowCmd: c_int, + ) -> HRESULT, + fn GetIconLocation( + pszIconPath: LPSTR, + cch: c_int, + piIcon: *mut c_int, + ) -> HRESULT, + fn SetIconLocation( + pszIconPath: LPCSTR, + iIcon: c_int, + ) -> HRESULT, + fn SetRelativePath( + pszPathRel: LPCSTR, + dwReserved: DWORD, + ) -> HRESULT, + fn Resolve( + hwnd: HWND, + fFlags: DWORD, + ) -> HRESULT, + fn SetPath( + pszFile: LPCSTR, + ) -> HRESULT, +}} +RIDL!{#[uuid(0x000214f9, 0x0000, 0x0000, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46)] +interface IShellLinkW(IShellLinkWVtbl): IUnknown(IUnknownVtbl) { + fn GetPath( + pszFile: LPWSTR, + cch: c_int, + pfd: *mut WIN32_FIND_DATAW, + fFlags: DWORD, + ) -> HRESULT, + fn GetIDList( + ppidl: *mut PIDLIST_ABSOLUTE, + ) -> HRESULT, + fn SetIDList( + pidl: PCIDLIST_ABSOLUTE, + ) -> HRESULT, + fn GetDescription( + pszName: LPWSTR, + cch: c_int, + ) -> HRESULT, + fn SetDescription( + pszName: LPCWSTR, + ) -> HRESULT, + fn GetWorkingDirectory( + pszDir: LPWSTR, + cch: c_int, + ) -> HRESULT, + fn SetWorkingDirectory( + pszDir: LPCWSTR, + ) -> HRESULT, + fn GetArguments( + pszArgs: LPWSTR, + cch: c_int, + ) -> HRESULT, + fn SetArguments( + pszArgs: LPCWSTR, + ) -> HRESULT, + fn GetHotkey( + pwHotkey: *mut WORD, + ) -> HRESULT, + fn SetHotkey( + wHotkey: WORD, + ) -> HRESULT, + fn GetShowCmd( + piShowCmd: *mut c_int, + ) -> HRESULT, + fn SetShowCmd( + iShowCmd: c_int, + ) -> HRESULT, + fn GetIconLocation( + pszIconPath: LPWSTR, + cch: c_int, + piIcon: *mut c_int, + ) -> HRESULT, + fn SetIconLocation( + pszIconPath: LPCWSTR, + iIcon: c_int, + ) -> HRESULT, + fn SetRelativePath( + pszPathRel: LPCWSTR, + dwReserved: DWORD, + ) -> HRESULT, + fn Resolve( + hwnd: HWND, + fFlags: DWORD, + ) -> HRESULT, + fn SetPath( + pszFile: LPCWSTR, + ) -> HRESULT, +}} RIDL!{#[uuid(0xc2cf3110, 0x460e, 0x4fc1, 0xb9, 0xd0, 0x8a, 0x1c, 0x0c, 0x9c, 0xc4, 0xbd)] class DesktopWallpaper;} RIDL!{#[uuid(0x00021400, 0x0000, 0x0000, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46)] @@ -304,3 +566,11 @@ class ApplicationActivationManager;} RIDL!{#[uuid(0x958a6fb5, 0xdcb2, 0x4faf, 0xaa, 0xfd, 0x7f, 0xb0, 0x54, 0xad, 0x1a, 0x3b)] class ApplicationDesignModeSettings;} +extern "system" { + pub fn SHCreateItemFromParsingName( + pszPath: PCWSTR, + pbc: *mut IBindCtx, + riid: REFIID, + ppv: *mut *mut c_void + ) -> HRESULT; +} diff -Nru cargo-0.44.1/vendor/winapi/src/um/shobjidl.rs cargo-0.47.0/vendor/winapi/src/um/shobjidl.rs --- cargo-0.44.1/vendor/winapi/src/um/shobjidl.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/shobjidl.rs 2020-10-01 21:38:28.000000000 +0000 @@ -3,60 +3,16 @@ // , at your option. // All files in the project carrying such notice may not be copied, modified, or distributed // except according to those terms. -use ctypes::c_void; -use shared::guiddef::{REFGUID, REFIID}; +use shared::guiddef::REFGUID; use shared::minwindef::{BOOL, DWORD, UINT}; use shared::windef::HWND; -use um::objidl::IBindCtx; -use um::propkeydef::REFPROPERTYKEY; -use um::propsys::{GETPROPERTYSTOREFLAGS, IPropertyDescriptionList, IPropertyStore}; -use um::shobjidl_core::{IModalWindow, IModalWindowVtbl, IShellItem, IShellItemFilter, SFGAOF}; +use um::propsys::{IPropertyDescriptionList, IPropertyStore}; +use um::shobjidl_core::{IModalWindow, IModalWindowVtbl, IShellItem, IShellItemFilter}; use um::shtypes::COMDLG_FILTERSPEC; use um::unknwnbase::{IUnknown, IUnknownVtbl}; use um::winnt::{HRESULT, LPCWSTR, LPWSTR, WCHAR}; pub type IFileOperationProgressSink = IUnknown; // TODO -ENUM!{enum SIATTRIBFLAGS { - SIATTRIBFLAGS_AND = 0x1, - SIATTRIBFLAGS_OR = 0x2, - SIATTRIBFLAGS_APPCOMPAT = 0x3, - SIATTRIBFLAGS_MASK = 0x3, - SIATTRIBFLAGS_ALLITEMS = 0x4000, -}} -RIDL!{#[uuid(0xb63ea76d, 0x1f85, 0x456f, 0xa1, 0x9c, 0x48, 0x15, 0x9e, 0xfa, 0x85, 0x8b)] -interface IShellItemArray(IShellItemArrayVtbl): IUnknown(IUnknownVtbl) { - fn BindToHandler( - pbc: *mut IBindCtx, - bhid: REFGUID, - riid: REFIID, - ppvOut: *mut *mut c_void, - ) -> HRESULT, - fn GetPropertyStore( - flags: GETPROPERTYSTOREFLAGS, - riid: REFIID, - ppv: *mut *mut c_void, - ) -> HRESULT, - fn GetPropertyDescriptionList( - keyType: REFPROPERTYKEY, - riid: REFIID, - ppv: *mut *mut c_void, - ) -> HRESULT, - fn GetAttributes( - AttribFlags: SIATTRIBFLAGS, - sfgaoMask: SFGAOF, - psfgaoAttribs: *mut SFGAOF, - ) -> HRESULT, - fn GetCount( - pdwNumItems: *mut DWORD, - ) -> HRESULT, - fn GetItemAt( - dwIndex: DWORD, - ppsi: *mut *mut IShellItem, - ) -> HRESULT, - // TODO: Add IEnumShellItems - //fn EnumItems( - // ppenumShellItems: *mut *mut IEnumShellItems, - //) -> HRESULT, -}} +pub use um::shobjidl_core::{IShellItemArray, SIATTRIBFLAGS}; // FIXME: Remove these in the next major release ENUM!{enum FDE_OVERWRITE_RESPONSE { FDEOR_DEFAULT = 0, FDEOR_ACCEPT = 1, diff -Nru cargo-0.44.1/vendor/winapi/src/um/softpub.rs cargo-0.47.0/vendor/winapi/src/um/softpub.rs --- cargo-0.44.1/vendor/winapi/src/um/softpub.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/softpub.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,3 @@ +//50 +DEFINE_GUID!{WINTRUST_ACTION_GENERIC_VERIFY_V2, + 0xaac56b, 0xcd44, 0x11d0, 0x8c, 0xc2, 0x0, 0xc0, 0x4f, 0xc2, 0x95, 0xee} diff -Nru cargo-0.44.1/vendor/winapi/src/um/tlhelp32.rs cargo-0.47.0/vendor/winapi/src/um/tlhelp32.rs --- cargo-0.44.1/vendor/winapi/src/um/tlhelp32.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/tlhelp32.rs 2020-10-01 21:38:28.000000000 +0000 @@ -20,7 +20,7 @@ pub const TH32CS_SNAPMODULE: DWORD = 0x00000008; pub const TH32CS_SNAPMODULE32: DWORD = 0x00000010; pub const TH32CS_SNAPALL: DWORD = - (TH32CS_SNAPHEAPLIST | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD | TH32CS_SNAPMODULE); + TH32CS_SNAPHEAPLIST | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD | TH32CS_SNAPMODULE; pub const TH32CS_INHERIT: DWORD = 0x80000000; STRUCT!{struct HEAPLIST32 { dwSize: SIZE_T, diff -Nru cargo-0.44.1/vendor/winapi/src/um/werapi.rs cargo-0.47.0/vendor/winapi/src/um/werapi.rs --- cargo-0.44.1/vendor/winapi/src/um/werapi.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/werapi.rs 2020-10-01 21:38:28.000000000 +0000 @@ -6,6 +6,17 @@ //! Function prototypes for Windows Error Reporting (WER) use shared::minwindef::{BOOL, DWORD, PDWORD}; use um::winnt::{HANDLE, HRESULT, PCWSTR, PVOID}; +pub const WER_FAULT_REPORTING_FLAG_NOHEAP: DWORD = 1; +pub const WER_FAULT_REPORTING_FLAG_QUEUE: DWORD = 2; +pub const WER_FAULT_REPORTING_FLAG_DISABLE_THREAD_SUSPENSION: DWORD = 4; +pub const WER_FAULT_REPORTING_FLAG_QUEUE_UPLOAD: DWORD = 8; +pub const WER_FAULT_REPORTING_ALWAYS_SHOW_UI: DWORD = 16; +pub const WER_FAULT_REPORTING_NO_UI: DWORD = 32; +pub const WER_FAULT_REPORTING_FLAG_NO_HEAP_ON_QUEUE: DWORD = 64; +pub const WER_FAULT_REPORTING_DISABLE_SNAPSHOT_CRASH: DWORD = 128; +pub const WER_FAULT_REPORTING_DISABLE_SNAPSHOT_HANG: DWORD = 256; +pub const WER_FAULT_REPORTING_CRITICAL: DWORD = 512; +pub const WER_FAULT_REPORTING_DURABLE: DWORD = 1024; ENUM!{enum WER_REGISTER_FILE_TYPE { WerRegFileTypeUserDocument = 1, WerRegFileTypeOther = 2, diff -Nru cargo-0.44.1/vendor/winapi/src/um/winbase.rs cargo-0.47.0/vendor/winapi/src/um/winbase.rs --- cargo-0.44.1/vendor/winapi/src/um/winbase.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/winbase.rs 2020-10-01 21:38:28.000000000 +0000 @@ -26,7 +26,8 @@ LPOVERLAPPED, LPOVERLAPPED_COMPLETION_ROUTINE, LPSECURITY_ATTRIBUTES, PREASON_CONTEXT, }; use um::processthreadsapi::{ - LPPROC_THREAD_ATTRIBUTE_LIST, LPSTARTUPINFOA, STARTUPINFOA, STARTUPINFOW, + LPPROCESS_INFORMATION, LPPROC_THREAD_ATTRIBUTE_LIST, LPSTARTUPINFOA, LPSTARTUPINFOW, + STARTUPINFOA, STARTUPINFOW, }; use um::winnt::{ BOOLEAN, CHAR, DWORDLONG, EXECUTION_STATE, FILE_ID_128, HANDLE, HRESULT, INT, LANGID, @@ -2306,9 +2307,37 @@ pdwProfileLength: LPDWORD, pQuotaLimits: PQUOTA_LIMITS, ) -> BOOL; - // pub fn CreateProcessWithLogonW(); - // pub fn CreateProcessWithTokenW(); - // pub fn IsTokenUntrusted(); +} +pub const LOGON_WITH_PROFILE: DWORD = 0x00000001; +pub const LOGON_NETCREDENTIALS_ONLY: DWORD = 0x00000002; +extern "system" { + pub fn CreateProcessWithLogonW( + lpUsername: LPCWSTR, + lpDomain: LPCWSTR, + lpPassword: LPCWSTR, + dwLogonFlags: DWORD, + lpApplicationName: LPCWSTR, + lpCommandLine: LPWSTR, + dwCreationFlags: DWORD, + lpEnvironment: LPVOID, + lpCurrentDirectory: LPCWSTR, + lpStartupInfo: LPSTARTUPINFOW, + lpProcessInformation: LPPROCESS_INFORMATION, + ) -> BOOL; + pub fn CreateProcessWithTokenW( + hToken: HANDLE, + dwLogonFlags: DWORD, + lpApplicationName: LPCWSTR, + lpCommandLine: LPWSTR, + dwCreationFlags: DWORD, + lpEnvironment: LPVOID, + lpCurrentDirectory: LPCWSTR, + lpStartupInfo: LPSTARTUPINFOW, + lpProcessInformation: LPPROCESS_INFORMATION, + ) -> BOOL; + pub fn IsTokenUntrusted( + TokenHandle: HANDLE, + ) -> BOOL; pub fn RegisterWaitForSingleObject( phNewWaitObject: PHANDLE, hObject: HANDLE, diff -Nru cargo-0.44.1/vendor/winapi/src/um/wingdi.rs cargo-0.47.0/vendor/winapi/src/um/wingdi.rs --- cargo-0.44.1/vendor/winapi/src/um/wingdi.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/wingdi.rs 2020-10-01 21:38:28.000000000 +0000 @@ -84,7 +84,7 @@ pub const LAYOUT_RTL: DWORD = 0x00000001; pub const LAYOUT_BTT: DWORD = 0x00000002; pub const LAYOUT_VBH: DWORD = 0x00000004; -pub const LAYOUT_ORIENTATIONMASK: DWORD = (LAYOUT_RTL | LAYOUT_BTT | LAYOUT_VBH); +pub const LAYOUT_ORIENTATIONMASK: DWORD = LAYOUT_RTL | LAYOUT_BTT | LAYOUT_VBH; pub const LAYOUT_BITMAPORIENTATIONPRESERVED: DWORD = 0x00000008; pub const TA_NOUPDATECP: UINT = 0; pub const TA_UPDATECP: UINT = 1; @@ -115,7 +115,7 @@ pub const DCB_RESET: UINT = 0x0001; pub const DCB_ACCUMULATE: UINT = 0x0002; pub const DCB_DIRTY: UINT = DCB_ACCUMULATE; -pub const DCB_SET: UINT = (DCB_RESET | DCB_ACCUMULATE); +pub const DCB_SET: UINT = DCB_RESET | DCB_ACCUMULATE; pub const DCB_ENABLE: UINT = 0x0004; pub const DCB_DISABLE: UINT = 0x0008; pub const META_SETBKCOLOR: WORD = 0x0201; diff -Nru cargo-0.44.1/vendor/winapi/src/um/winnt.rs cargo-0.47.0/vendor/winapi/src/um/winnt.rs --- cargo-0.44.1/vendor/winapi/src/um/winnt.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/winnt.rs 2020-10-01 21:38:28.000000000 +0000 @@ -148,25 +148,9 @@ pub type PLONGLONG = *mut LONGLONG; pub type PULONGLONG = *mut ULONGLONG; pub type USN = LONGLONG; -STRUCT!{struct LARGE_INTEGER_u { - LowPart: DWORD, - HighPart: LONG, -}} -UNION!{union LARGE_INTEGER { - [u64; 1], - QuadPart QuadPart_mut: LONGLONG, - u u_mut: LARGE_INTEGER_u, -}} +pub use shared::ntdef::LARGE_INTEGER; pub type PLARGE_INTEGER = *mut LARGE_INTEGER; -STRUCT!{struct ULARGE_INTEGER_u { - LowPart: DWORD, - HighPart: LONG, -}} -UNION!{union ULARGE_INTEGER { - [u64; 1], - QuadPart QuadPart_mut: ULONGLONG, - u u_mut: ULARGE_INTEGER_u, -}} +pub use shared::ntdef::ULARGE_INTEGER; pub type PULARGE_INTEGER = *mut ULARGE_INTEGER; pub type RTL_REFERENCE_COUNT = LONG_PTR; pub type PRTL_REFERENCE_COUNT = *mut LONG_PTR; @@ -4052,7 +4036,7 @@ pub const SEC_NOCACHE: DWORD = 0x10000000; pub const SEC_WRITECOMBINE: DWORD = 0x40000000; pub const SEC_LARGE_PAGES: DWORD = 0x80000000; -pub const SEC_IMAGE_NO_EXECUTE: DWORD = (SEC_IMAGE | SEC_NOCACHE); +pub const SEC_IMAGE_NO_EXECUTE: DWORD = SEC_IMAGE | SEC_NOCACHE; pub const MEM_IMAGE: DWORD = SEC_IMAGE; pub const WRITE_WATCH_FLAG_RESET: DWORD = 0x01; pub const MEM_UNMAP_WITH_TRANSIENT_BOOST: DWORD = 0x01; @@ -6972,7 +6956,7 @@ EndAddress: DWORD, u: IMAGE_RUNTIME_FUNCTION_ENTRY_u, }} -type _PIMAGE_RUNTIME_FUNCTION_ENTRY = *mut _IMAGE_RUNTIME_FUNCTION_ENTRY; +pub type _PIMAGE_RUNTIME_FUNCTION_ENTRY = *mut _IMAGE_RUNTIME_FUNCTION_ENTRY; pub type IMAGE_IA64_RUNTIME_FUNCTION_ENTRY = _IMAGE_RUNTIME_FUNCTION_ENTRY; pub type PIMAGE_IA64_RUNTIME_FUNCTION_ENTRY = _PIMAGE_RUNTIME_FUNCTION_ENTRY; #[cfg(target_arch = "aarch64")] diff -Nru cargo-0.44.1/vendor/winapi/src/um/winreg.rs cargo-0.47.0/vendor/winapi/src/um/winreg.rs --- cargo-0.44.1/vendor/winapi/src/um/winreg.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/winreg.rs 2020-10-01 21:38:28.000000000 +0000 @@ -8,7 +8,16 @@ BOOL, BYTE, DWORD, HKEY, LPBYTE, LPCVOID, LPDWORD, PFILETIME, PHKEY, ULONG }; use um::minwinbase::LPSECURITY_ATTRIBUTES; -use um::winnt::{ACCESS_MASK, HANDLE, LONG, LPCSTR, LPCWSTR, LPSTR, LPWSTR, PVOID}; +use um::reason::{ + SHTDN_REASON_FLAG_PLANNED, SHTDN_REASON_LEGACY_API, SHTDN_REASON_MAJOR_HARDWARE, + SHTDN_REASON_MAJOR_OTHER, SHTDN_REASON_MAJOR_SOFTWARE, SHTDN_REASON_MAJOR_SYSTEM, + SHTDN_REASON_MINOR_HUNG, SHTDN_REASON_MINOR_INSTALLATION, SHTDN_REASON_MINOR_OTHER, + SHTDN_REASON_MINOR_RECONFIG, SHTDN_REASON_MINOR_UNSTABLE, SHTDN_REASON_UNKNOWN, +}; +use um::winnt::{ + ACCESS_MASK, BOOLEAN, HANDLE, LONG, LPCSTR, LPCWSTR, LPSTR, LPWSTR, PBOOLEAN, PLONG, + PSECURITY_DESCRIPTOR, PVOID, SECURITY_INFORMATION, +}; pub type LSTATUS = LONG; pub const RRF_RT_REG_NONE: DWORD = 0x00000001; pub const RRF_RT_REG_SZ: DWORD = 0x00000002; @@ -27,16 +36,16 @@ pub const RRF_ZEROONFAILURE: DWORD = 0x20000000; pub const REG_PROCESS_APPKEY: DWORD = 0x00000001; pub type REGSAM = ACCESS_MASK; -pub const HKEY_CLASSES_ROOT: HKEY = 0x80000000i32 as isize as HKEY; -pub const HKEY_CURRENT_USER: HKEY = 0x80000001i32 as isize as HKEY; -pub const HKEY_LOCAL_MACHINE: HKEY = 0x80000002i32 as isize as HKEY; -pub const HKEY_USERS: HKEY = 0x80000003i32 as isize as HKEY; -pub const HKEY_PERFORMANCE_DATA: HKEY = 0x80000004i32 as isize as HKEY; -pub const HKEY_PERFORMANCE_TEXT: HKEY = 0x80000050i32 as isize as HKEY; -pub const HKEY_PERFORMANCE_NLSTEXT: HKEY = 0x80000060i32 as isize as HKEY; -pub const HKEY_CURRENT_CONFIG: HKEY = 0x80000005i32 as isize as HKEY; -pub const HKEY_DYN_DATA: HKEY = 0x80000006i32 as isize as HKEY; -pub const HKEY_CURRENT_USER_LOCAL_SETTINGS: HKEY = 0x80000007i32 as isize as HKEY; +pub const HKEY_CLASSES_ROOT: HKEY = 0x80000000i32 as usize as HKEY; +pub const HKEY_CURRENT_USER: HKEY = 0x80000001i32 as usize as HKEY; +pub const HKEY_LOCAL_MACHINE: HKEY = 0x80000002i32 as usize as HKEY; +pub const HKEY_USERS: HKEY = 0x80000003i32 as usize as HKEY; +pub const HKEY_PERFORMANCE_DATA: HKEY = 0x80000004i32 as usize as HKEY; +pub const HKEY_PERFORMANCE_TEXT: HKEY = 0x80000050i32 as usize as HKEY; +pub const HKEY_PERFORMANCE_NLSTEXT: HKEY = 0x80000060i32 as usize as HKEY; +pub const HKEY_CURRENT_CONFIG: HKEY = 0x80000005i32 as usize as HKEY; +pub const HKEY_DYN_DATA: HKEY = 0x80000006i32 as usize as HKEY; +pub const HKEY_CURRENT_USER_LOCAL_SETTINGS: HKEY = 0x80000007i32 as usize as HKEY; // PROVIDER_KEEPS_VALUE_LENGTH // val_context // PVALUEA @@ -214,8 +223,18 @@ hKey: HKEY, lpValueName: LPCWSTR, ) -> LSTATUS; - // pub fn RegEnumKeyA(); - // pub fn RegEnumKeyW(); + pub fn RegEnumKeyA( + hKey: HKEY, + dwIndex: DWORD, + lpName: LPSTR, + cchName: DWORD, + ) -> LSTATUS; + pub fn RegEnumKeyW( + hKey: HKEY, + dwIndex: DWORD, + lpName: LPWSTR, + cchName: DWORD, + ) -> LSTATUS; pub fn RegEnumKeyExA( hKey: HKEY, dwIndex: DWORD, @@ -259,9 +278,22 @@ pub fn RegFlushKey( hKey: HKEY, ) -> LSTATUS; - // pub fn RegGetKeySecurity(); - // pub fn RegLoadKeyA(); - // pub fn RegLoadKeyW(); + pub fn RegGetKeySecurity( + hKey: HKEY, + SecurityInformation: SECURITY_INFORMATION, + pSecurityDescriptor: PSECURITY_DESCRIPTOR, + lpcbSecurityDescriptor: LPDWORD, + ) -> LSTATUS; + pub fn RegLoadKeyA( + hKey: HKEY, + lpSubKey: LPCSTR, + lpFile: LPCSTR, + ) -> LSTATUS; + pub fn RegLoadKeyW( + hKey: HKEY, + lpSubKey: LPCWSTR, + lpFile: LPCWSTR, + ) -> LSTATUS; pub fn RegNotifyChangeKeyValue( hKey: HKEY, bWatchSubtree: BOOL, @@ -269,8 +301,16 @@ hEvent: HANDLE, fAsynchronous: BOOL, ) -> LSTATUS; - // pub fn RegOpenKeyA(); - // pub fn RegOpenKeyW(); + pub fn RegOpenKeyA( + hKey: HKEY, + lpSubKey: LPCSTR, + phkResult: PHKEY, + ) -> LSTATUS; + pub fn RegOpenKeyW( + hKey: HKEY, + lpSubKey: LPCWSTR, + phkResult: PHKEY, + ) -> LSTATUS; pub fn RegOpenKeyExA( hKey: HKEY, lpSubKey: LPCSTR, @@ -331,8 +371,18 @@ lpcbSecurityDescriptor: LPDWORD, lpftLastWriteTime: PFILETIME, ) -> LSTATUS; - // pub fn RegQueryValueA(); - // pub fn RegQueryValueW(); + pub fn RegQueryValueA( + hKey: HKEY, + lpSubKey: LPCSTR, + lpData: LPSTR, + lpcbData: PLONG, + ) -> LSTATUS; + pub fn RegQueryValueW( + hKey: HKEY, + lpSubKey: LPCWSTR, + lpData: LPWSTR, + lpcbData: PLONG, + ) -> LSTATUS; pub fn RegQueryMultipleValuesA( hKey: HKEY, val_list: PVALENTA, @@ -363,16 +413,62 @@ lpData: LPBYTE, lpcbData: LPDWORD, ) -> LSTATUS; - // pub fn RegReplaceKeyA(); - // pub fn RegReplaceKeyW(); - // pub fn RegRestoreKeyA(); - // pub fn RegRestoreKeyW(); - // pub fn RegRenameKey(); - // pub fn RegSaveKeyA(); - // pub fn RegSaveKeyW(); - // pub fn RegSetKeySecurity(); - // pub fn RegSetValueA(); - // pub fn RegSetValueW(); + pub fn RegReplaceKeyA( + hKey: HKEY, + lpSubKey: LPCSTR, + lpNewFile: LPCSTR, + lpOldFile: LPCSTR, + ) -> LSTATUS; + pub fn RegReplaceKeyW( + hKey: HKEY, + lpSubKey: LPCWSTR, + lpNewFile: LPCWSTR, + lpOldFile: LPCWSTR, + ) -> LSTATUS; + pub fn RegRestoreKeyA( + hKey: HKEY, + lpFile: LPCSTR, + dwFlags: DWORD, + ) -> LSTATUS; + pub fn RegRestoreKeyW( + hKey: HKEY, + lpFile: LPCWSTR, + dwFlags: DWORD, + ) -> LSTATUS; + pub fn RegRenameKey( + hKey: HKEY, + lpSubKeyName: LPCWSTR, + lpNewKeyName: LPCWSTR, + ) -> LSTATUS; + pub fn RegSaveKeyA( + hKey: HKEY, + lpFile: LPCSTR, + lpSecurityAttributes: LPSECURITY_ATTRIBUTES, + ) -> LSTATUS; + pub fn RegSaveKeyW( + hKey: HKEY, + lpFile: LPCWSTR, + lpSecurityAttributes: LPSECURITY_ATTRIBUTES, + ) -> LSTATUS; + pub fn RegSetKeySecurity( + hKey: HKEY, + SecurityInformation: SECURITY_INFORMATION, + pSecurityDescriptor: PSECURITY_DESCRIPTOR, + ) -> LSTATUS; + pub fn RegSetValueA( + hKey: HKEY, + lpSubKey: LPCSTR, + dwType: DWORD, + lpData: LPCSTR, + cbData: DWORD, + ) -> LSTATUS; + pub fn RegSetValueW( + hKey: HKEY, + lpSubKey: LPCWSTR, + dwType: DWORD, + lpData: LPCWSTR, + cbData: DWORD, + ) -> LSTATUS; pub fn RegSetValueExA( hKey: HKEY, lpValueName: LPCSTR, @@ -389,8 +485,14 @@ lpData: *const BYTE, cbData: DWORD, ) -> LSTATUS; - // pub fn RegUnLoadKeyA(); - // pub fn RegUnLoadKeyW(); + pub fn RegUnLoadKeyA( + hKey: HKEY, + lpSubKey: LPCSTR, + ) -> LSTATUS; + pub fn RegUnLoadKeyW( + hKey: HKEY, + lpSubKey: LPCWSTR, + ) -> LSTATUS; pub fn RegDeleteKeyValueA( hKey: HKEY, lpSubKey: LPCSTR, @@ -453,7 +555,15 @@ lpSubKey: LPCWSTR, hKeyDest: HKEY, ) -> LSTATUS; - // pub fn RegLoadMUIStringA(); + pub fn RegLoadMUIStringA( + hKey: HKEY, + pszValue: LPCSTR, + pszOutBuf: LPSTR, + cbOutBuf: DWORD, + pcbData: LPDWORD, + Flags: DWORD, + pszDirectory: LPCSTR, + ) -> LSTATUS; pub fn RegLoadMUIStringW( hKey: HKEY, pszValue: LPCWSTR, @@ -463,10 +573,34 @@ Flags: DWORD, pszDirectory: LPCWSTR, ) -> LSTATUS; - // pub fn RegLoadAppKeyA(); - // pub fn RegLoadAppKeyW(); - // pub fn InitiateSystemShutdownA(); - // pub fn InitiateSystemShutdownW(); + pub fn RegLoadAppKeyA( + lpFile: LPCSTR, + phkResult: PHKEY, + samDesired: REGSAM, + dwOptions: DWORD, + Reserved: DWORD, + ) -> LSTATUS; + pub fn RegLoadAppKeyW( + lpFile: LPCWSTR, + phkResult: PHKEY, + samDesired: REGSAM, + dwOptions: DWORD, + Reserved: DWORD, + ) -> LSTATUS; + pub fn InitiateSystemShutdownA( + lpMachineName: LPSTR, + lpMessage: LPSTR, + dwTimeout: DWORD, + bForceAppsClosed: BOOL, + bRebootAfterShutdown: BOOL, + ) -> BOOL; + pub fn InitiateSystemShutdownW( + lpMachineName: LPWSTR, + lpMessage: LPWSTR, + dwTimeout: DWORD, + bForceAppsClosed: BOOL, + bRebootAfterShutdown: BOOL, + ) -> BOOL; pub fn AbortSystemShutdownA( lpMachineName: LPSTR, ) -> BOOL; @@ -474,17 +608,77 @@ lpMachineName: LPWSTR, ) -> BOOL; } -// REASON_* -// MAX_SHUTDOWN_TIMEOUT +pub const REASON_SWINSTALL: DWORD = SHTDN_REASON_MAJOR_SOFTWARE | SHTDN_REASON_MINOR_INSTALLATION; +pub const REASON_HWINSTALL: DWORD = SHTDN_REASON_MAJOR_HARDWARE | SHTDN_REASON_MINOR_INSTALLATION; +pub const REASON_SERVICEHANG: DWORD = SHTDN_REASON_MAJOR_SOFTWARE | SHTDN_REASON_MINOR_HUNG; +pub const REASON_UNSTABLE: DWORD = SHTDN_REASON_MAJOR_SYSTEM | SHTDN_REASON_MINOR_UNSTABLE; +pub const REASON_SWHWRECONF: DWORD = SHTDN_REASON_MAJOR_SOFTWARE | SHTDN_REASON_MINOR_RECONFIG; +pub const REASON_OTHER: DWORD = SHTDN_REASON_MAJOR_OTHER | SHTDN_REASON_MINOR_OTHER; +pub const REASON_UNKNOWN: DWORD = SHTDN_REASON_UNKNOWN; +pub const REASON_LEGACY_API: DWORD = SHTDN_REASON_LEGACY_API; +pub const REASON_PLANNED_FLAG: DWORD = SHTDN_REASON_FLAG_PLANNED; +pub const MAX_SHUTDOWN_TIMEOUT: DWORD = 10 * 365 * 24 * 60 * 60; extern "system" { - // pub fn InitiateSystemShutdownExA(); - // pub fn InitiateSystemShutdownExW(); + pub fn InitiateSystemShutdownExA( + lpMachineName: LPSTR, + lpMessage: LPSTR, + dwTimeout: DWORD, + bForceAppsClosed: BOOL, + bRebootAfterShutdown: BOOL, + dwReason: DWORD, + ) -> BOOL; + pub fn InitiateSystemShutdownExW( + lpMachineName: LPWSTR, + lpMessage: LPWSTR, + dwTimeout: DWORD, + bForceAppsClosed: BOOL, + bRebootAfterShutdown: BOOL, + dwReason: DWORD, + ) -> BOOL; } -// SHUTDOWN_* +pub const SHUTDOWN_FORCE_OTHERS: DWORD = 0x00000001; +pub const SHUTDOWN_FORCE_SELF: DWORD = 0x00000002; +pub const SHUTDOWN_RESTART: DWORD = 0x00000004; +pub const SHUTDOWN_POWEROFF: DWORD = 0x00000008; +pub const SHUTDOWN_NOREBOOT: DWORD = 0x00000010; +pub const SHUTDOWN_GRACE_OVERRIDE: DWORD = 0x00000020; +pub const SHUTDOWN_INSTALL_UPDATES: DWORD = 0x00000040; +pub const SHUTDOWN_RESTARTAPPS: DWORD = 0x00000080; +pub const SHUTDOWN_SKIP_SVC_PRESHUTDOWN: DWORD = 0x00000100; +pub const SHUTDOWN_HYBRID: DWORD = 0x00000200; +pub const SHUTDOWN_RESTART_BOOTOPTIONS: DWORD = 0x00000400; +pub const SHUTDOWN_SOFT_REBOOT: DWORD = 0x00000800; +pub const SHUTDOWN_MOBILE_UI: DWORD = 0x00001000; +pub const SHUTDOWN_ARSO: DWORD = 0x00002000; extern "system" { - // pub fn InitiateShutdownA(); - // pub fn InitiateShutdownW(); - // pub fn CheckForHiberboot(); - // pub fn RegSaveKeyExA(); - // pub fn RegSaveKeyExW(); + pub fn InitiateShutdownA( + lpMachineName: LPSTR, + lpMessage: LPSTR, + dwGracePeriod: DWORD, + dwShutdownFlags: DWORD, + dwReason: DWORD, + ) -> DWORD; + pub fn InitiateShutdownW( + lpMachineName: LPWSTR, + lpMessage: LPWSTR, + dwGracePeriod: DWORD, + dwShutdownFlags: DWORD, + dwReason: DWORD, + ) -> DWORD; + pub fn CheckForHiberboot( + pHiberboot: PBOOLEAN, + bClearFlag: BOOLEAN, + ) -> DWORD; + pub fn RegSaveKeyExA( + hKey: HKEY, + lpFile: LPCSTR, + lpSecurityAttributes: LPSECURITY_ATTRIBUTES, + Flags: DWORD, + ) -> LSTATUS; + pub fn RegSaveKeyExW( + hKey: HKEY, + lpFile: LPCWSTR, + lpSecurityAttributes: LPSECURITY_ATTRIBUTES, + Flags: DWORD, + ) -> LSTATUS; } diff -Nru cargo-0.44.1/vendor/winapi/src/um/winspool.rs cargo-0.47.0/vendor/winapi/src/um/winspool.rs --- cargo-0.44.1/vendor/winapi/src/um/winspool.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/winspool.rs 2020-10-01 21:38:28.000000000 +0000 @@ -160,7 +160,7 @@ pub type PPRINTER_INFO_9A = *mut PRINTER_INFO_9A; pub type LPPRINTER_INFO_9A = *mut PRINTER_INFO_9A; STRUCT!{struct PRINTER_INFO_9W { - pDevMode: LPDEVMODEA, + pDevMode: LPDEVMODEW, }} pub type PPRINTER_INFO_9W = *mut PRINTER_INFO_9W; pub type LPPRINTER_INFO_9W = *mut PRINTER_INFO_9W; diff -Nru cargo-0.44.1/vendor/winapi/src/um/winsvc.rs cargo-0.47.0/vendor/winapi/src/um/winsvc.rs --- cargo-0.44.1/vendor/winapi/src/um/winsvc.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/winsvc.rs 2020-10-01 21:38:28.000000000 +0000 @@ -185,6 +185,28 @@ 0x1f81d131, 0x3fac, 0x4537, 0x9e, 0x0c, 0x7e, 0x7b, 0x0c, 0x2f, 0x4b, 0x55} DEFINE_GUID!{CUSTOM_SYSTEM_STATE_CHANGE_EVENT_GUID, 0x2d7a2816, 0x0c5e, 0x45fc, 0x9c, 0xe7, 0x57, 0x0e, 0x5e, 0xcd, 0xe9, 0xc9} +ENUM!{enum SC_ACTION_TYPE { + SC_ACTION_NONE = 0, + SC_ACTION_RESTART = 1, + SC_ACTION_REBOOT = 2, + SC_ACTION_RUN_COMMAND = 3, +}} +STRUCT!{struct SC_ACTION { + Type: SC_ACTION_TYPE, + Delay: DWORD, +}} +pub type LPSC_ACTION = *mut SC_ACTION; +STRUCT!{struct SERVICE_FAILURE_ACTIONSW { + dwResetPeriod: DWORD, + lpRebootMsg: LPWSTR, + lpCommand: LPWSTR, + cActions: DWORD, + lpsaActions: LPSC_ACTION, +}} +pub type LPSERVICE_FAILURE_ACTIONSW = *mut SERVICE_FAILURE_ACTIONSW; +STRUCT!{struct SERVICE_FAILURE_ACTIONS_FLAG { + fFailureActionsOnNonCrashFailures: BOOL, +}} DECLARE_HANDLE!{SC_HANDLE, SC_HANDLE__} pub type LPSC_HANDLE = *mut SC_HANDLE; DECLARE_HANDLE!{SERVICE_STATUS_HANDLE, SERVICE_STATUS_HANDLE__} @@ -277,6 +299,14 @@ lpDisplayName: LPWSTR, }} pub type LPQUERY_SERVICE_CONFIGW = *mut QUERY_SERVICE_CONFIGW; +STRUCT!{struct SERVICE_DESCRIPTIONA { + lpDescription: LPSTR, +}} +pub type LPSERVICE_DESCRIPTIONA = *mut SERVICE_DESCRIPTIONA; +STRUCT!{struct SERVICE_DESCRIPTIONW { + lpDescription: LPWSTR, +}} +pub type LPSERVICE_DESCRIPTIONW = *mut SERVICE_DESCRIPTIONW; FN!{stdcall LPSERVICE_MAIN_FUNCTIONW( dwNumServicesArgs: DWORD, lpServiceArgVectors: *mut LPWSTR, diff -Nru cargo-0.44.1/vendor/winapi/src/um/wintrust.rs cargo-0.47.0/vendor/winapi/src/um/wintrust.rs --- cargo-0.44.1/vendor/winapi/src/um/wintrust.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/wintrust.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,123 @@ +use shared::guiddef::GUID; +use shared::minwindef::{BYTE, DWORD, LPVOID}; +use shared::ntdef::{HANDLE, LONG, LPCWSTR, LPWSTR, WCHAR}; +use shared::windef::HWND; +use um::wincrypt::{CRYPT_DATA_BLOB, PCERT_STRONG_SIGN_PARA}; +//90 +pub const WTD_UI_ALL: DWORD = 1; +pub const WTD_UI_NONE: DWORD = 2; +pub const WTD_UI_NOBAD: DWORD = 3; +pub const WTD_UI_NOGOOD: DWORD = 4; +pub const WTD_REVOKE_NONE: DWORD = 0x00000000; +pub const WTD_REVOKE_WHOLECHAIN: DWORD = 0x00000001; +pub const WTD_CHOICE_FILE: DWORD = 1; +pub const WTD_CHOICE_CATALOG: DWORD = 2; +pub const WTD_CHOICE_BLOB: DWORD = 3; +pub const WTD_CHOICE_SIGNER: DWORD = 4; +pub const WTD_CHOICE_CERT: DWORD = 5; +pub const WTD_STATEACTION_IGNORE: DWORD = 0x00000000; +pub const WTD_STATEACTION_VERIFY: DWORD = 0x00000001; +pub const WTD_STATEACTION_CLOSE: DWORD = 0x00000002; +pub const WTD_STATEACTION_AUTO_CACHE: DWORD = 0x00000003; +pub const WTD_STATEACTION_AUTO_CACHE_FLUSH: DWORD = 0x00000004; +pub const WTD_PROV_FLAGS_MASK: DWORD = 0x0000FFFF; +pub const WTD_USE_IE4_TRUST_FLAG: DWORD = 0x00000001; +pub const WTD_NO_IE4_CHAIN_FLAG: DWORD = 0x00000002; +pub const WTD_NO_POLICY_USAGE_FLAG: DWORD = 0x00000004; +pub const WTD_REVOCATION_CHECK_NONE: DWORD = 0x00000010; +pub const WTD_REVOCATION_CHECK_END_CERT: DWORD = 0x00000020; +pub const WTD_REVOCATION_CHECK_CHAIN: DWORD = 0x00000040; +pub const WTD_REVOCATION_CHECK_CHAIN_EXCLUDE_ROOT: DWORD = 0x00000080; +pub const WTD_SAFER_FLAG: DWORD = 0x00000100; +pub const WTD_HASH_ONLY_FLAG: DWORD = 0x00000200; +pub const WTD_USE_DEFAULT_OSVER_CHECK: DWORD = 0x00000400; +pub const WTD_LIFETIME_SIGNING_FLAG: DWORD = 0x00000800; +pub const WTD_CACHE_ONLY_URL_RETRIEVAL: DWORD = 0x00001000; +pub const WTD_DISABLE_MD2_MD4: DWORD = 0x00002000; +pub const WTD_MOTW: DWORD = 0x00004000; +pub const WTD_CODE_INTEGRITY_DRIVER_MODE: DWORD = 0x00008000; +pub const WTD_UICONTEXT_EXECUTE: DWORD = 0; +pub const WTD_UICONTEXT_INSTALL: DWORD = 1; +STRUCT!{struct WINTRUST_DATA { + cbStruct: DWORD, + pPolicyCallbackData: LPVOID, + pSIPClientData: LPVOID, + dwUIChoice: DWORD, + fdwRevocationChecks: DWORD, + dwUnionChoice: DWORD, + u: WINTRUST_DATA_u, + dwStateAction: DWORD, + hWVTStateData: HANDLE, + pwszURLReference: *mut WCHAR, + dwProvFlags: DWORD, + dwUIContext: DWORD, + pSignatureSettings: *mut WINTRUST_SIGNATURE_SETTINGS, +}} +UNION!{union WINTRUST_DATA_u { + [usize; 1], + pFile pFile_mut: *mut WINTRUST_FILE_INFO, + // pCatalog pCatalog_mut: *mut WINTRUST_CATALOG_INFO, + // pBlob pBlob_mut: *mut WINTRUST_BLOB_INFO, + // pSgnr pSgnr_mut: *mut WINTRUST_SGNR_INFO, + // pCert pCert_mut: *mut WINTRUST_CERT_INFO, +}} +pub type PWINTRUST_DATA = *mut WINTRUST_DATA; +STRUCT!{struct WINTRUST_SIGNATURE_SETTINGS { + cbStruct: DWORD, + dwIndex: DWORD, + dwFlags: DWORD, + cSecondarySigs: DWORD, + dwVerifiedSigIndex: DWORD, + pCryptoPolicy: PCERT_STRONG_SIGN_PARA, +}} +pub type PWINTRUST_SIGNATURE_SETTINGS = *mut WINTRUST_SIGNATURE_SETTINGS; +//217 +STRUCT! {struct WINTRUST_FILE_INFO { + cbStruct: DWORD, + pcwszFilePath: LPCWSTR, + hFile: HANDLE, + pgKnownSubject: *const GUID, +}} +//414 +extern "system" { + pub fn WinVerifyTrust(hwnd: HWND, pgActionID: *mut GUID, pWVTData: LPVOID) -> LONG; +} +//1246 +pub const SPC_UUID_LENGTH: usize = 16; +pub type SPC_UUID = [BYTE; SPC_UUID_LENGTH]; +//SpcSerializedObjectAttributesClassId +STRUCT!{struct SPC_SERIALIZED_OBJECT { + ClassId: SPC_UUID, + SerializedData: CRYPT_DATA_BLOB, +}} +pub type PSPC_SERIALIZED_OBJECT = *mut SPC_SERIALIZED_OBJECT; +STRUCT!{struct SPC_SIGINFO { + dwSipVersion: DWORD, + gSIPGuid: GUID, + dwReserved1: DWORD, + dwReserved2: DWORD, + dwReserved3: DWORD, + dwReserved4: DWORD, + dwReserved5: DWORD, +}} +pub type PSPC_SIGINFO = *mut SPC_SIGINFO; +pub const SPC_URL_LINK_CHOICE: DWORD = 1; +pub const SPC_MONIKER_LINK_CHOICE: DWORD = 2; +pub const SPC_FILE_LINK_CHOICE: DWORD = 3; +STRUCT!{struct SPC_LINK { + dwLinkChoice: DWORD, + u: SPC_LINK_u, +}} +UNION!{union SPC_LINK_u { + [u32; 6] [u64; 4], + pwszUrl pwszUrl_mut: LPWSTR, + Moniker Moniker_mut: SPC_SERIALIZED_OBJECT, + pwszFile pwszFile_mut: LPWSTR, +}} +pub type PSPC_LINK = *mut SPC_LINK; +//1337 +STRUCT!{struct SPC_SP_OPUS_INFO { + pwszProgramName: LPCWSTR, + pMoreInfo: *mut SPC_LINK, + pPublisherInfo: *mut SPC_LINK, +}} diff -Nru cargo-0.44.1/vendor/winapi/src/um/winuser.rs cargo-0.47.0/vendor/winapi/src/um/winuser.rs --- cargo-0.44.1/vendor/winapi/src/um/winuser.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/winuser.rs 2020-10-01 21:38:28.000000000 +0000 @@ -1432,8 +1432,8 @@ pub const WMSZ_BOTTOM: UINT = 6; pub const WMSZ_BOTTOMLEFT: UINT = 7; pub const WMSZ_BOTTOMRIGHT: UINT = 8; -pub const HTERROR: LRESULT = (-2); -pub const HTTRANSPARENT: LRESULT = (-1); +pub const HTERROR: LRESULT = -2; +pub const HTTRANSPARENT: LRESULT = -1; pub const HTNOWHERE: LRESULT = 0; pub const HTCLIENT: LRESULT = 1; pub const HTCAPTION: LRESULT = 2; @@ -2509,7 +2509,8 @@ pub const HWND_BOTTOM: HWND = 1 as HWND; pub const HWND_TOPMOST: HWND = -1isize as HWND; pub const HWND_NOTOPMOST: HWND = -2isize as HWND; -STRUCT!{struct DLGTEMPLATE { +// FIXME packed(2) +STRUCT!{#[repr(packed)] struct DLGTEMPLATE { style: DWORD, dwExtendedStyle: DWORD, cdit: WORD, @@ -2522,7 +2523,8 @@ pub type LPDLGTEMPLATEW = *mut DLGTEMPLATE; pub type LPCDLGTEMPLATEA = *const DLGTEMPLATE; pub type LPCDLGTEMPLATEW = *const DLGTEMPLATE; -STRUCT!{struct DLGITEMTEMPLATE { +// FIXME packed(2) +STRUCT!{#[repr(packed)] struct DLGITEMTEMPLATE { style: DWORD, dwExtendedStyle: DWORD, x: c_short, @@ -3252,6 +3254,11 @@ pub const TOUCH_FEEDBACK_DEFAULT: DWORD = 0x1; pub const TOUCH_FEEDBACK_INDIRECT: DWORD = 0x2; pub const TOUCH_FEEDBACK_NONE: DWORD = 0x3; +ENUM!{enum POINTER_FEEDBACK_MODE { + POINTER_FEEDBACK_DEFAULT = 1, + POINTER_FEEDBACK_INDIRECT = 2, + POINTER_FEEDBACK_NONE = 3, +}} extern "system" { pub fn InitializeTouchInjection( maxCount: UINT32, @@ -3381,6 +3388,24 @@ hwnd: HWND, pointerType: POINTER_INPUT_TYPE, ) -> BOOL; +} +DECLARE_HANDLE!{HSYNTHETICPOINTERDEVICE, HSYNTHETICPOINTERDEVICE__} +extern "system" { + pub fn CreateSyntheticPointerDevice( + pointerType: POINTER_INPUT_TYPE, + maxCount: ULONG, + mode: POINTER_FEEDBACK_MODE, + ) -> HSYNTHETICPOINTERDEVICE; + pub fn InjectSyntheticPointerInput( + device: HSYNTHETICPOINTERDEVICE, + pointerInfo: *const POINTER_TYPE_INFO, + count: UINT32, + ) -> BOOL; + pub fn DestroySyntheticPointerDevice( + device: HSYNTHETICPOINTERDEVICE, + ); +} +extern "system" { pub fn EnableMouseInPointer( fEnable: BOOL, ) -> BOOL; @@ -6311,7 +6336,28 @@ iMinAnimate: c_int, }} pub type LPANIMATIONINFO = *mut ANIMATIONINFO; -//12672 +//12638 +STRUCT!{struct HIGHCONTRASTA { + cbSize: UINT, + dwFlags: DWORD, + lpszDefaultScheme: LPSTR, +}} +pub type LPHIGHCONTRASTA = *mut HIGHCONTRASTA; +STRUCT!{struct HIGHCONTRASTW { + cbSize: UINT, + dwFlags: DWORD, + lpszDefaultScheme: LPWSTR, +}} +pub type LPHIGHCONTRASTW = *mut HIGHCONTRASTW; +pub const HCF_HIGHCONTRASTON: DWORD = 0x00000001; +pub const HCF_AVAILABLE: DWORD = 0x00000002; +pub const HCF_HOTKEYACTIVE: DWORD = 0x00000004; +pub const HCF_CONFIRMHOTKEY: DWORD = 0x00000008; +pub const HCF_HOTKEYSOUND: DWORD = 0x00000010; +pub const HCF_INDICATOR: DWORD = 0x00000020; +pub const HCF_HOTKEYAVAILABLE: DWORD = 0x00000040; +pub const HCF_LOGONDESKTOP: DWORD = 0x00000100; +pub const HCF_DEFAULTDESKTOP: DWORD = 0x00000200; pub const CDS_UPDATEREGISTRY: DWORD = 0x00000001; pub const CDS_TEST: DWORD = 0x00000002; pub const CDS_FULLSCREEN: DWORD = 0x00000004; @@ -6894,7 +6940,7 @@ pub const GA_ROOTOWNER: UINT = 3; extern "system" { pub fn GetAncestor( - hWnd: HWND, + hwnd: HWND, gaFlags: UINT, ) -> HWND; pub fn RealChildWindowFromPoint( diff -Nru cargo-0.44.1/vendor/winapi/src/um/wlanapi.rs cargo-0.47.0/vendor/winapi/src/um/wlanapi.rs --- cargo-0.44.1/vendor/winapi/src/um/wlanapi.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/wlanapi.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,1459 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +//! Definitions and data structures for wlan auto config client side API. +use shared::devpropdef::DEVPROPKEY; +use shared::guiddef::{GUID, LPGUID}; +use shared::minwindef::{BOOL, BYTE, DWORD, LPBYTE, PBOOL, PBYTE, PDWORD, PUCHAR, ULONG, USHORT}; +use shared::windef::HWND; +use shared::windot11::{ + DOT11_COUNTRY_OR_REGION_STRING, DOT11_HESSID, DOT11_MAC_ADDRESS, DOT11_PHY_TYPE, + DOT11_RATE_SET_MAX_LENGTH, PDOT11_BSSID_LIST, PDOT11_HESSID, PDOT11_MAC_ADDRESS, +}; +use shared::wlantypes::{ + DOT11_ACCESSNETWORKOPTIONS, DOT11_AUTH_ALGORITHM, DOT11_AUTH_CIPHER_PAIR, DOT11_BSS_TYPE, + DOT11_CIPHER_ALGORITHM, DOT11_SSID, DOT11_VENUEINFO, PDOT11_ACCESSNETWORKOPTIONS, PDOT11_SSID, +}; +use um::eaptypes::EAP_METHOD_TYPE; +use um::l2cmn::{ + L2_NOTIFICATION_CODE_PUBLIC_BEGIN, L2_NOTIFICATION_CODE_V2_BEGIN, L2_NOTIFICATION_DATA, + L2_NOTIFICATION_SOURCE_ALL, L2_NOTIFICATION_SOURCE_NONE, L2_NOTIFICATION_SOURCE_ONEX, + L2_NOTIFICATION_SOURCE_WLAN_ACM, L2_NOTIFICATION_SOURCE_WLAN_HNWK, + L2_NOTIFICATION_SOURCE_WLAN_IHV, L2_NOTIFICATION_SOURCE_WLAN_MSM, + L2_NOTIFICATION_SOURCE_WLAN_SECURITY, L2_PROFILE_MAX_NAME_LENGTH, + L2_REASON_CODE_DOT11_AC_BASE, L2_REASON_CODE_DOT11_MSM_BASE, + L2_REASON_CODE_DOT11_SECURITY_BASE, L2_REASON_CODE_GROUP_SIZE, L2_REASON_CODE_PROFILE_BASE, + L2_REASON_CODE_SUCCESS, L2_REASON_CODE_UNKNOWN, +}; +use um::winnt::{ + BOOLEAN, DELETE, FILE_EXECUTE, FILE_READ_DATA, FILE_WRITE_DATA, HANDLE, LONG, LPCWSTR, LPWSTR, + PHANDLE, PVOID, PWCHAR, STANDARD_RIGHTS_EXECUTE, STANDARD_RIGHTS_READ, STANDARD_RIGHTS_WRITE, + ULONGLONG, WCHAR, WRITE_DAC, +}; +pub const WLAN_API_VERSION_1_0: DWORD = 0x00000001; +pub const WLAN_API_VERSION_2_0: DWORD = 0x00000002; +#[inline] +pub fn WLAN_API_VERSION_MAJOR(v: u32) -> u32 { + v & 0xffff +} +#[inline] +pub fn WLAN_API_VERSION_MINOR(v: u32) -> u32 { + v >> 16 +} +#[inline] +pub fn WLAN_API_MAKE_VERSION(major: u32, minor: u32) -> u32 { + minor << 16 | major +} +pub const WLAN_API_VERSION: u32 = WLAN_API_VERSION_2_0; +pub const WLAN_MAX_NAME_LENGTH: usize = L2_PROFILE_MAX_NAME_LENGTH; +pub const WLAN_PROFILE_GROUP_POLICY: DWORD = 0x00000001; +pub const WLAN_PROFILE_USER: DWORD = 0x00000002; +pub const WLAN_PROFILE_GET_PLAINTEXT_KEY: DWORD = 0x00000004; +pub const WLAN_PROFILE_CONNECTION_MODE_SET_BY_CLIENT: DWORD = 0x00010000; +pub const WLAN_PROFILE_CONNECTION_MODE_AUTO: DWORD = 0x00020000; +pub const WLAN_SET_EAPHOST_DATA_ALL_USERS: DWORD = 0x00000001; +STRUCT!{struct WLAN_PROFILE_INFO { + strProfileName: [WCHAR; WLAN_MAX_NAME_LENGTH], + dwFlags: DWORD, +}} +pub type PWLAN_PROFILE_INFO = *mut WLAN_PROFILE_INFO; +STRUCT!{struct DOT11_NETWORK { + dot11Ssid: DOT11_SSID, + dot11BssType: DOT11_BSS_TYPE, +}} +pub type PDOT11_NETWORK = *mut DOT11_NETWORK; +pub const DOT11_PSD_IE_MAX_DATA_SIZE: DWORD = 240; +pub const DOT11_PSD_IE_MAX_ENTRY_NUMBER: DWORD = 5; +STRUCT!{struct WLAN_RAW_DATA { + dwDataSize: DWORD, + DataBlob: [BYTE; 1], +}} +pub type PWLAN_RAW_DATA = *mut WLAN_RAW_DATA; +STRUCT!{struct WLAN_RAW_DATA_LIST_DataList { + dwDataOffset: DWORD, + dwDataSize: DWORD, +}} +STRUCT!{struct WLAN_RAW_DATA_LIST { + dwTotalSize: DWORD, + dwNumberOfItems: DWORD, + DataList: [WLAN_RAW_DATA_LIST_DataList; 1], +}} +pub type PWLAN_RAW_DATA_LIST = *mut WLAN_RAW_DATA_LIST; +ENUM!{enum WLAN_CONNECTION_MODE { + wlan_connection_mode_profile = 0, + wlan_connection_mode_temporary_profile = 1, + wlan_connection_mode_discovery_secure = 2, + wlan_connection_mode_discovery_unsecure = 3, + wlan_connection_mode_auto = 4, + wlan_connection_mode_invalid = 5, +}} +pub type PWLAN_CONNECTION_MODE = *mut WLAN_CONNECTION_MODE; +pub type WLAN_REASON_CODE = DWORD; +pub type PWLAN_REASON_CODE = *mut DWORD; +pub const WLAN_REASON_CODE_SUCCESS: WLAN_REASON_CODE = L2_REASON_CODE_SUCCESS; +pub const WLAN_REASON_CODE_UNKNOWN: WLAN_REASON_CODE = L2_REASON_CODE_UNKNOWN; +pub const WLAN_REASON_CODE_RANGE_SIZE: WLAN_REASON_CODE = L2_REASON_CODE_GROUP_SIZE; +pub const WLAN_REASON_CODE_BASE: WLAN_REASON_CODE = L2_REASON_CODE_DOT11_AC_BASE; +pub const WLAN_REASON_CODE_AC_BASE: WLAN_REASON_CODE = L2_REASON_CODE_DOT11_AC_BASE; +pub const WLAN_REASON_CODE_AC_CONNECT_BASE: WLAN_REASON_CODE = WLAN_REASON_CODE_AC_BASE + + WLAN_REASON_CODE_RANGE_SIZE / 2; +pub const WLAN_REASON_CODE_AC_END: WLAN_REASON_CODE = WLAN_REASON_CODE_AC_BASE + + WLAN_REASON_CODE_RANGE_SIZE - 1; +pub const WLAN_REASON_CODE_PROFILE_BASE: WLAN_REASON_CODE = L2_REASON_CODE_PROFILE_BASE; +pub const WLAN_REASON_CODE_PROFILE_CONNECT_BASE: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + + WLAN_REASON_CODE_RANGE_SIZE / 2; +pub const WLAN_REASON_CODE_PROFILE_END: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + + WLAN_REASON_CODE_RANGE_SIZE - 1; +pub const WLAN_REASON_CODE_MSM_BASE: WLAN_REASON_CODE = L2_REASON_CODE_DOT11_MSM_BASE; +pub const WLAN_REASON_CODE_MSM_CONNECT_BASE: WLAN_REASON_CODE = WLAN_REASON_CODE_MSM_BASE + + WLAN_REASON_CODE_RANGE_SIZE / 2; +pub const WLAN_REASON_CODE_MSM_END: WLAN_REASON_CODE = WLAN_REASON_CODE_MSM_BASE + + WLAN_REASON_CODE_RANGE_SIZE - 1; +pub const WLAN_REASON_CODE_MSMSEC_BASE: WLAN_REASON_CODE = L2_REASON_CODE_DOT11_SECURITY_BASE; +pub const WLAN_REASON_CODE_MSMSEC_CONNECT_BASE: WLAN_REASON_CODE = WLAN_REASON_CODE_MSMSEC_BASE + + WLAN_REASON_CODE_RANGE_SIZE / 2; +pub const WLAN_REASON_CODE_MSMSEC_END: WLAN_REASON_CODE = WLAN_REASON_CODE_MSMSEC_BASE + + WLAN_REASON_CODE_RANGE_SIZE - 1; +pub const WLAN_REASON_CODE_NETWORK_NOT_COMPATIBLE: WLAN_REASON_CODE = WLAN_REASON_CODE_AC_BASE + 1; +pub const WLAN_REASON_CODE_PROFILE_NOT_COMPATIBLE: WLAN_REASON_CODE = WLAN_REASON_CODE_AC_BASE + 2; +pub const WLAN_REASON_CODE_NO_AUTO_CONNECTION: WLAN_REASON_CODE = WLAN_REASON_CODE_AC_CONNECT_BASE + + 1; +pub const WLAN_REASON_CODE_NOT_VISIBLE: WLAN_REASON_CODE = WLAN_REASON_CODE_AC_CONNECT_BASE + 2; +pub const WLAN_REASON_CODE_GP_DENIED: WLAN_REASON_CODE = WLAN_REASON_CODE_AC_CONNECT_BASE + 3; +pub const WLAN_REASON_CODE_USER_DENIED: WLAN_REASON_CODE = WLAN_REASON_CODE_AC_CONNECT_BASE + 4; +pub const WLAN_REASON_CODE_BSS_TYPE_NOT_ALLOWED: WLAN_REASON_CODE = + WLAN_REASON_CODE_AC_CONNECT_BASE + 5; +pub const WLAN_REASON_CODE_IN_FAILED_LIST: WLAN_REASON_CODE = WLAN_REASON_CODE_AC_CONNECT_BASE + 6; +pub const WLAN_REASON_CODE_IN_BLOCKED_LIST: WLAN_REASON_CODE = WLAN_REASON_CODE_AC_CONNECT_BASE + + 7; +pub const WLAN_REASON_CODE_SSID_LIST_TOO_LONG: WLAN_REASON_CODE = WLAN_REASON_CODE_AC_CONNECT_BASE + + 8; +pub const WLAN_REASON_CODE_CONNECT_CALL_FAIL: WLAN_REASON_CODE = WLAN_REASON_CODE_AC_CONNECT_BASE + + 9; +pub const WLAN_REASON_CODE_SCAN_CALL_FAIL: WLAN_REASON_CODE = WLAN_REASON_CODE_AC_CONNECT_BASE + + 10; +pub const WLAN_REASON_CODE_NETWORK_NOT_AVAILABLE: WLAN_REASON_CODE = + WLAN_REASON_CODE_AC_CONNECT_BASE + 11; +pub const WLAN_REASON_CODE_PROFILE_CHANGED_OR_DELETED: WLAN_REASON_CODE = + WLAN_REASON_CODE_AC_CONNECT_BASE + 12; +pub const WLAN_REASON_CODE_KEY_MISMATCH: WLAN_REASON_CODE = WLAN_REASON_CODE_AC_CONNECT_BASE + 13; +pub const WLAN_REASON_CODE_USER_NOT_RESPOND: WLAN_REASON_CODE = WLAN_REASON_CODE_AC_CONNECT_BASE + + 14; +pub const WLAN_REASON_CODE_AP_PROFILE_NOT_ALLOWED_FOR_CLIENT: WLAN_REASON_CODE = + WLAN_REASON_CODE_AC_CONNECT_BASE + 15; +pub const WLAN_REASON_CODE_AP_PROFILE_NOT_ALLOWED: WLAN_REASON_CODE = + WLAN_REASON_CODE_AC_CONNECT_BASE + 16; +pub const WLAN_REASON_CODE_HOTSPOT2_PROFILE_DENIED: WLAN_REASON_CODE = + WLAN_REASON_CODE_AC_CONNECT_BASE + 17; +pub const WLAN_REASON_CODE_INVALID_PROFILE_SCHEMA: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + + 1; +pub const WLAN_REASON_CODE_PROFILE_MISSING: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + 2; +pub const WLAN_REASON_CODE_INVALID_PROFILE_NAME: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + + 3; +pub const WLAN_REASON_CODE_INVALID_PROFILE_TYPE: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + + 4; +pub const WLAN_REASON_CODE_INVALID_PHY_TYPE: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + 5; +pub const WLAN_REASON_CODE_MSM_SECURITY_MISSING: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + + 6; +pub const WLAN_REASON_CODE_IHV_SECURITY_NOT_SUPPORTED: WLAN_REASON_CODE = + WLAN_REASON_CODE_PROFILE_BASE + 7; +pub const WLAN_REASON_CODE_IHV_OUI_MISMATCH: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + 8; +pub const WLAN_REASON_CODE_IHV_OUI_MISSING: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + 9; +pub const WLAN_REASON_CODE_IHV_SETTINGS_MISSING: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + + 10; +pub const WLAN_REASON_CODE_CONFLICT_SECURITY: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + + 11; +pub const WLAN_REASON_CODE_SECURITY_MISSING: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + 12; +pub const WLAN_REASON_CODE_INVALID_BSS_TYPE: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + 13; +pub const WLAN_REASON_CODE_INVALID_ADHOC_CONNECTION_MODE: WLAN_REASON_CODE = + WLAN_REASON_CODE_PROFILE_BASE + 14; +pub const WLAN_REASON_CODE_NON_BROADCAST_SET_FOR_ADHOC: WLAN_REASON_CODE = + WLAN_REASON_CODE_PROFILE_BASE + 15; +pub const WLAN_REASON_CODE_AUTO_SWITCH_SET_FOR_ADHOC: WLAN_REASON_CODE = + WLAN_REASON_CODE_PROFILE_BASE + 16; +pub const WLAN_REASON_CODE_AUTO_SWITCH_SET_FOR_MANUAL_CONNECTION: WLAN_REASON_CODE = + WLAN_REASON_CODE_PROFILE_BASE + 17; +pub const WLAN_REASON_CODE_IHV_SECURITY_ONEX_MISSING: WLAN_REASON_CODE = + WLAN_REASON_CODE_PROFILE_BASE + 18; +pub const WLAN_REASON_CODE_PROFILE_SSID_INVALID: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + + 19; +pub const WLAN_REASON_CODE_TOO_MANY_SSID: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + 20; +pub const WLAN_REASON_CODE_IHV_CONNECTIVITY_NOT_SUPPORTED: WLAN_REASON_CODE = + WLAN_REASON_CODE_PROFILE_BASE + 21; +pub const WLAN_REASON_CODE_BAD_MAX_NUMBER_OF_CLIENTS_FOR_AP: WLAN_REASON_CODE = + WLAN_REASON_CODE_PROFILE_BASE + 22; +pub const WLAN_REASON_CODE_INVALID_CHANNEL: WLAN_REASON_CODE = WLAN_REASON_CODE_PROFILE_BASE + 23; +pub const WLAN_REASON_CODE_OPERATION_MODE_NOT_SUPPORTED: WLAN_REASON_CODE = + WLAN_REASON_CODE_PROFILE_BASE + 24; +pub const WLAN_REASON_CODE_AUTO_AP_PROFILE_NOT_ALLOWED: WLAN_REASON_CODE = + WLAN_REASON_CODE_PROFILE_BASE + 25; +pub const WLAN_REASON_CODE_AUTO_CONNECTION_NOT_ALLOWED: WLAN_REASON_CODE = + WLAN_REASON_CODE_PROFILE_BASE + 26; +pub const WLAN_REASON_CODE_HOTSPOT2_PROFILE_NOT_ALLOWED: WLAN_REASON_CODE = + WLAN_REASON_CODE_PROFILE_BASE + 27; +pub const WLAN_REASON_CODE_UNSUPPORTED_SECURITY_SET_BY_OS: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSM_BASE + 1; +pub const WLAN_REASON_CODE_UNSUPPORTED_SECURITY_SET: WLAN_REASON_CODE = WLAN_REASON_CODE_MSM_BASE + + 2; +pub const WLAN_REASON_CODE_BSS_TYPE_UNMATCH: WLAN_REASON_CODE = WLAN_REASON_CODE_MSM_BASE + 3; +pub const WLAN_REASON_CODE_PHY_TYPE_UNMATCH: WLAN_REASON_CODE = WLAN_REASON_CODE_MSM_BASE + 4; +pub const WLAN_REASON_CODE_DATARATE_UNMATCH: WLAN_REASON_CODE = WLAN_REASON_CODE_MSM_BASE + 5; +pub const WLAN_REASON_CODE_USER_CANCELLED: WLAN_REASON_CODE = WLAN_REASON_CODE_MSM_CONNECT_BASE + + 1; +pub const WLAN_REASON_CODE_ASSOCIATION_FAILURE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSM_CONNECT_BASE + 2; +pub const WLAN_REASON_CODE_ASSOCIATION_TIMEOUT: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSM_CONNECT_BASE + 3; +pub const WLAN_REASON_CODE_PRE_SECURITY_FAILURE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSM_CONNECT_BASE + 4; +pub const WLAN_REASON_CODE_START_SECURITY_FAILURE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSM_CONNECT_BASE + 5; +pub const WLAN_REASON_CODE_SECURITY_FAILURE: WLAN_REASON_CODE = WLAN_REASON_CODE_MSM_CONNECT_BASE + + 6; +pub const WLAN_REASON_CODE_SECURITY_TIMEOUT: WLAN_REASON_CODE = WLAN_REASON_CODE_MSM_CONNECT_BASE + + 7; +pub const WLAN_REASON_CODE_ROAMING_FAILURE: WLAN_REASON_CODE = WLAN_REASON_CODE_MSM_CONNECT_BASE + + 8; +pub const WLAN_REASON_CODE_ROAMING_SECURITY_FAILURE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSM_CONNECT_BASE + 9; +pub const WLAN_REASON_CODE_ADHOC_SECURITY_FAILURE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSM_CONNECT_BASE + 10; +pub const WLAN_REASON_CODE_DRIVER_DISCONNECTED: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSM_CONNECT_BASE + 11; +pub const WLAN_REASON_CODE_DRIVER_OPERATION_FAILURE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSM_CONNECT_BASE + 12; +pub const WLAN_REASON_CODE_IHV_NOT_AVAILABLE: WLAN_REASON_CODE = WLAN_REASON_CODE_MSM_CONNECT_BASE + + 13; +pub const WLAN_REASON_CODE_IHV_NOT_RESPONDING: WLAN_REASON_CODE = WLAN_REASON_CODE_MSM_CONNECT_BASE + + 14; +pub const WLAN_REASON_CODE_DISCONNECT_TIMEOUT: WLAN_REASON_CODE = WLAN_REASON_CODE_MSM_CONNECT_BASE + + 15; +pub const WLAN_REASON_CODE_INTERNAL_FAILURE: WLAN_REASON_CODE = WLAN_REASON_CODE_MSM_CONNECT_BASE + + 16; +pub const WLAN_REASON_CODE_UI_REQUEST_TIMEOUT: WLAN_REASON_CODE = WLAN_REASON_CODE_MSM_CONNECT_BASE + + 17; +pub const WLAN_REASON_CODE_TOO_MANY_SECURITY_ATTEMPTS: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSM_CONNECT_BASE + 18; +pub const WLAN_REASON_CODE_AP_STARTING_FAILURE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSM_CONNECT_BASE + 19; +pub const WLAN_REASON_CODE_MSMSEC_MIN: WLAN_REASON_CODE = WLAN_REASON_CODE_MSMSEC_BASE; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_INVALID_KEY_INDEX: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 1; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_PSK_PRESENT: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 2; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_KEY_LENGTH: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 3; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_PSK_LENGTH: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 4; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_NO_AUTH_CIPHER_SPECIFIED: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 5; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_TOO_MANY_AUTH_CIPHER_SPECIFIED: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 6; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_DUPLICATE_AUTH_CIPHER: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 7; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_RAWDATA_INVALID: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 8; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_INVALID_AUTH_CIPHER: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 9; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_ONEX_DISABLED: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 10; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_ONEX_ENABLED: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 11; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_INVALID_PMKCACHE_MODE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 12; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_INVALID_PMKCACHE_SIZE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 13; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_INVALID_PMKCACHE_TTL: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 14; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_INVALID_PREAUTH_MODE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 15; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_INVALID_PREAUTH_THROTTLE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 16; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_PREAUTH_ONLY_ENABLED: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 17; +pub const WLAN_REASON_CODE_MSMSEC_CAPABILITY_NETWORK: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 18; +pub const WLAN_REASON_CODE_MSMSEC_CAPABILITY_NIC: WLAN_REASON_CODE = WLAN_REASON_CODE_MSMSEC_BASE + + 19; +pub const WLAN_REASON_CODE_MSMSEC_CAPABILITY_PROFILE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 20; +pub const WLAN_REASON_CODE_MSMSEC_CAPABILITY_DISCOVERY: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 21; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_PASSPHRASE_CHAR: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 22; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_KEYMATERIAL_CHAR: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 23; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_WRONG_KEYTYPE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 24; +pub const WLAN_REASON_CODE_MSMSEC_MIXED_CELL: WLAN_REASON_CODE = WLAN_REASON_CODE_MSMSEC_BASE + 25; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_AUTH_TIMERS_INVALID: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 26; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_INVALID_GKEY_INTV: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 27; +pub const WLAN_REASON_CODE_MSMSEC_TRANSITION_NETWORK: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 28; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_KEY_UNMAPPED_CHAR: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 29; +pub const WLAN_REASON_CODE_MSMSEC_CAPABILITY_PROFILE_AUTH: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 30; +pub const WLAN_REASON_CODE_MSMSEC_CAPABILITY_PROFILE_CIPHER: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 31; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_SAFE_MODE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 32; +pub const WLAN_REASON_CODE_MSMSEC_CAPABILITY_PROFILE_SAFE_MODE_NIC: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 33; +pub const WLAN_REASON_CODE_MSMSEC_CAPABILITY_PROFILE_SAFE_MODE_NW: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 34; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_UNSUPPORTED_AUTH: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 35; +pub const WLAN_REASON_CODE_MSMSEC_PROFILE_UNSUPPORTED_CIPHER: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 36; +pub const WLAN_REASON_CODE_MSMSEC_CAPABILITY_MFP_NW_NIC: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_BASE + 37; +pub const WLAN_REASON_CODE_MSMSEC_UI_REQUEST_FAILURE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 1; +pub const WLAN_REASON_CODE_MSMSEC_AUTH_START_TIMEOUT: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 2; +pub const WLAN_REASON_CODE_MSMSEC_AUTH_SUCCESS_TIMEOUT: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 3; +pub const WLAN_REASON_CODE_MSMSEC_KEY_START_TIMEOUT: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 4; +pub const WLAN_REASON_CODE_MSMSEC_KEY_SUCCESS_TIMEOUT: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 5; +pub const WLAN_REASON_CODE_MSMSEC_M3_MISSING_KEY_DATA: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 6; +pub const WLAN_REASON_CODE_MSMSEC_M3_MISSING_IE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 7; +pub const WLAN_REASON_CODE_MSMSEC_M3_MISSING_GRP_KEY: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 8; +pub const WLAN_REASON_CODE_MSMSEC_PR_IE_MATCHING: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 9; +pub const WLAN_REASON_CODE_MSMSEC_SEC_IE_MATCHING: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 10; +pub const WLAN_REASON_CODE_MSMSEC_NO_PAIRWISE_KEY: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 11; +pub const WLAN_REASON_CODE_MSMSEC_G1_MISSING_KEY_DATA: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 12; +pub const WLAN_REASON_CODE_MSMSEC_G1_MISSING_GRP_KEY: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 13; +pub const WLAN_REASON_CODE_MSMSEC_PEER_INDICATED_INSECURE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 14; +pub const WLAN_REASON_CODE_MSMSEC_NO_AUTHENTICATOR: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 15; +pub const WLAN_REASON_CODE_MSMSEC_NIC_FAILURE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 16; +pub const WLAN_REASON_CODE_MSMSEC_CANCELLED: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 17; +pub const WLAN_REASON_CODE_MSMSEC_KEY_FORMAT: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 18; +pub const WLAN_REASON_CODE_MSMSEC_DOWNGRADE_DETECTED: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 19; +pub const WLAN_REASON_CODE_MSMSEC_PSK_MISMATCH_SUSPECTED: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 20; +pub const WLAN_REASON_CODE_MSMSEC_FORCED_FAILURE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 21; +pub const WLAN_REASON_CODE_MSMSEC_M3_TOO_MANY_RSNIE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 22; +pub const WLAN_REASON_CODE_MSMSEC_M2_MISSING_KEY_DATA: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 23; +pub const WLAN_REASON_CODE_MSMSEC_M2_MISSING_IE: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 24; +pub const WLAN_REASON_CODE_MSMSEC_AUTH_WCN_COMPLETED: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 25; +pub const WLAN_REASON_CODE_MSMSEC_M3_MISSING_MGMT_GRP_KEY: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 26; +pub const WLAN_REASON_CODE_MSMSEC_G1_MISSING_MGMT_GRP_KEY: WLAN_REASON_CODE = + WLAN_REASON_CODE_MSMSEC_CONNECT_BASE + 27; +pub const WLAN_REASON_CODE_MSMSEC_MAX: WLAN_REASON_CODE = WLAN_REASON_CODE_MSMSEC_END; +pub type WLAN_SIGNAL_QUALITY = ULONG; +pub type PWLAN_SIGNAL_QUALITY = *mut ULONG; +pub const WLAN_AVAILABLE_NETWORK_CONNECTED: DWORD = 0x00000001; +pub const WLAN_AVAILABLE_NETWORK_HAS_PROFILE: DWORD = 0x00000002; +pub const WLAN_AVAILABLE_NETWORK_CONSOLE_USER_PROFILE: DWORD = 0x00000004; +pub const WLAN_AVAILABLE_NETWORK_INTERWORKING_SUPPORTED: DWORD = 0x00000008; +pub const WLAN_AVAILABLE_NETWORK_HOTSPOT2_ENABLED: DWORD = 0x00000010; +pub const WLAN_AVAILABLE_NETWORK_ANQP_SUPPORTED: DWORD = 0x00000020; +pub const WLAN_AVAILABLE_NETWORK_HOTSPOT2_DOMAIN: DWORD = 0x00000040; +pub const WLAN_AVAILABLE_NETWORK_HOTSPOT2_ROAMING: DWORD = 0x00000080; +pub const WLAN_AVAILABLE_NETWORK_AUTO_CONNECT_FAILED: DWORD = 0x00000100; +pub const WLAN_AVAILABLE_NETWORK_INCLUDE_ALL_ADHOC_PROFILES: DWORD = 0x00000001; +pub const WLAN_AVAILABLE_NETWORK_INCLUDE_ALL_MANUAL_HIDDEN_PROFILES: DWORD = 0x00000002; +STRUCT!{struct WLAN_RATE_SET { + uRateSetLength: ULONG, + usRateSet: [USHORT; DOT11_RATE_SET_MAX_LENGTH], +}} +pub type PWLAN_RATE_SET = *mut WLAN_RATE_SET; +pub const WLAN_MAX_PHY_TYPE_NUMBER: usize = 8; +STRUCT!{struct WLAN_AVAILABLE_NETWORK { + strProfileName: [WCHAR; WLAN_MAX_NAME_LENGTH], + dot11Ssid: DOT11_SSID, + dot11BssType: DOT11_BSS_TYPE, + uNumberOfBssids: ULONG, + bNetworkConnectable: BOOL, + wlanNotConnectableReason: WLAN_REASON_CODE, + uNumberOfPhyTypes: ULONG, + dot11PhyTypes: [DOT11_PHY_TYPE; WLAN_MAX_PHY_TYPE_NUMBER], + bMorePhyTypes: BOOL, + wlanSignalQuality: WLAN_SIGNAL_QUALITY, + bSecurityEnabled: BOOL, + dot11DefaultAuthAlgorithm: DOT11_AUTH_ALGORITHM, + dot11DefaultCipherAlgorithm: DOT11_CIPHER_ALGORITHM, + dwFlags: DWORD, + dwReserved: DWORD, +}} +pub type PWLAN_AVAILABLE_NETWORK = *mut WLAN_AVAILABLE_NETWORK; +STRUCT!{struct WLAN_AVAILABLE_NETWORK_V2 { + strProfileName: [WCHAR; WLAN_MAX_NAME_LENGTH], + dot11Ssid: DOT11_SSID, + dot11BssType: DOT11_BSS_TYPE, + uNumberOfBssids: ULONG, + bNetworkConnectable: BOOL, + wlanNotConnectableReason: WLAN_REASON_CODE, + uNumberOfPhyTypes: ULONG, + dot11PhyTypes: [DOT11_PHY_TYPE; WLAN_MAX_PHY_TYPE_NUMBER], + bMorePhyTypes: BOOL, + wlanSignalQuality: WLAN_SIGNAL_QUALITY, + bSecurityEnabled: BOOL, + dot11DefaultAuthAlgorithm: DOT11_AUTH_ALGORITHM, + dot11DefaultCipherAlgorithm: DOT11_CIPHER_ALGORITHM, + dwFlags: DWORD, + AccessNetworkOptions: DOT11_ACCESSNETWORKOPTIONS, + dot11HESSID: DOT11_HESSID, + VenueInfo: DOT11_VENUEINFO, + dwReserved: DWORD, +}} +pub type PWLAN_AVAILABLE_NETWORK_V2 = *mut WLAN_AVAILABLE_NETWORK_V2; +STRUCT!{struct WLAN_BSS_ENTRY { + dot11Ssid: DOT11_SSID, + uPhyId: ULONG, + dot11Bssid: DOT11_MAC_ADDRESS, + dot11BssType: DOT11_BSS_TYPE, + dot11BssPhyType: DOT11_PHY_TYPE, + lRssi: LONG, + uLinkQuality: ULONG, + bInRegDomain: BOOLEAN, + usBeaconPeriod: USHORT, + ullTimestamp: ULONGLONG, + ullHostTimestamp: ULONGLONG, + usCapabilityInformation: USHORT, + ulChCenterFrequency: ULONG, + wlanRateSet: WLAN_RATE_SET, + ulIeOffset: ULONG, + ulIeSize: ULONG, +}} +pub type PWLAN_BSS_ENTRY = *mut WLAN_BSS_ENTRY; +STRUCT!{struct WLAN_BSS_LIST { + dwTotalSize: DWORD, + dwNumberOfItems: DWORD, + wlanBssEntries: [WLAN_BSS_ENTRY; 1], +}} +pub type PWLAN_BSS_LIST = *mut WLAN_BSS_LIST; +ENUM!{enum WLAN_INTERFACE_STATE { + wlan_interface_state_not_ready = 0, + wlan_interface_state_connected = 1, + wlan_interface_state_ad_hoc_network_formed = 2, + wlan_interface_state_disconnecting = 3, + wlan_interface_state_disconnected = 4, + wlan_interface_state_associating = 5, + wlan_interface_state_discovering = 6, + wlan_interface_state_authenticating = 7, +}} +pub type PWLAN_INTERFACE_STATE = *mut WLAN_INTERFACE_STATE; +ENUM!{enum WLAN_ADHOC_NETWORK_STATE { + wlan_adhoc_network_state_formed = 0, + wlan_adhoc_network_state_connected = 1, +}} +pub type PWLAN_ADHOC_NETWORK_STATE = *mut WLAN_ADHOC_NETWORK_STATE; +STRUCT!{struct WLAN_INTERFACE_INFO { + InterfaceGuid: GUID, + strInterfaceDescription: [WCHAR; WLAN_MAX_NAME_LENGTH], + isState: WLAN_INTERFACE_STATE, +}} +pub type PWLAN_INTERFACE_INFO = *mut WLAN_INTERFACE_INFO; +STRUCT!{struct WLAN_ASSOCIATION_ATTRIBUTES { + dot11Ssid: DOT11_SSID, + dot11BssType: DOT11_BSS_TYPE, + dot11Bssid: DOT11_MAC_ADDRESS, + dot11PhyType: DOT11_PHY_TYPE, + uDot11PhyIndex: ULONG, + wlanSignalQuality: WLAN_SIGNAL_QUALITY, + ulRxRate: ULONG, + ulTxRate: ULONG, +}} +pub type PWLAN_ASSOCIATION_ATTRIBUTES = *mut WLAN_ASSOCIATION_ATTRIBUTES; +STRUCT!{struct WLAN_SECURITY_ATTRIBUTES { + bSecurityEnabled: BOOL, + bOneXEnabled: BOOL, + dot11AuthAlgorithm: DOT11_AUTH_ALGORITHM, + dot11CipherAlgorithm: DOT11_CIPHER_ALGORITHM, +}} +pub type PWLAN_SECURITY_ATTRIBUTES = *mut WLAN_SECURITY_ATTRIBUTES; +STRUCT!{struct WLAN_CONNECTION_ATTRIBUTES { + isState: WLAN_INTERFACE_STATE, + wlanConnectionMode: WLAN_CONNECTION_MODE, + strProfileName: [WCHAR; WLAN_MAX_NAME_LENGTH], + wlanAssociationAttributes: WLAN_ASSOCIATION_ATTRIBUTES, + wlanSecurityAttributes: WLAN_SECURITY_ATTRIBUTES, +}} +pub type PWLAN_CONNECTION_ATTRIBUTES = *mut WLAN_CONNECTION_ATTRIBUTES; +ENUM!{enum DOT11_RADIO_STATE { + dot11_radio_state_unknown = 0, + dot11_radio_state_on = 1, + dot11_radio_state_off = 2, +}} +pub type PDOT11_RADIO_STATE = *mut DOT11_RADIO_STATE; +pub const WLAN_MAX_PHY_INDEX: usize = 64; +STRUCT!{struct WLAN_PHY_RADIO_STATE { + dwPhyIndex: DWORD, + dot11SoftwareRadioState: DOT11_RADIO_STATE, + dot11HardwareRadioState: DOT11_RADIO_STATE, +}} +pub type PWLAN_PHY_RADIO_STATE = *mut WLAN_PHY_RADIO_STATE; +STRUCT!{struct WLAN_RADIO_STATE { + dwNumberOfPhys: DWORD, + PhyRadioState: [WLAN_PHY_RADIO_STATE; WLAN_MAX_PHY_INDEX], +}} +pub type PWLAN_RADIO_STATE = *mut WLAN_RADIO_STATE; +ENUM!{enum WLAN_OPERATIONAL_STATE { + wlan_operational_state_unknown = 0, + wlan_operational_state_off = 1, + wlan_operational_state_on = 2, + wlan_operational_state_going_off = 3, + wlan_operational_state_going_on = 4, +}} +pub type PWLAN_OPERATIONAL_STATE = *mut WLAN_OPERATIONAL_STATE; +ENUM!{enum WLAN_INTERFACE_TYPE { + wlan_interface_type_emulated_802_11 = 0, + wlan_interface_type_native_802_11 = 1, + wlan_interface_type_invalid = 2, +}} +pub type PWLAN_INTERFACE_TYPE = *mut WLAN_INTERFACE_TYPE; +STRUCT!{struct WLAN_INTERFACE_CAPABILITY { + interfaceType: WLAN_INTERFACE_TYPE, + bDot11DSupported: BOOL, + dwMaxDesiredSsidListSize: DWORD, + dwMaxDesiredBssidListSize: DWORD, + dwNumberOfSupportedPhys: DWORD, + dot11PhyTypes: [DOT11_PHY_TYPE; WLAN_MAX_PHY_INDEX], +}} +pub type PWLAN_INTERFACE_CAPABILITY = *mut WLAN_INTERFACE_CAPABILITY; +STRUCT!{struct WLAN_AUTH_CIPHER_PAIR_LIST { + dwNumberOfItems: DWORD, + pAuthCipherPairList: [DOT11_AUTH_CIPHER_PAIR; 1], +}} +pub type PWLAN_AUTH_CIPHER_PAIR_LIST = *mut WLAN_AUTH_CIPHER_PAIR_LIST; +STRUCT!{struct WLAN_COUNTRY_OR_REGION_STRING_LIST { + dwNumberOfItems: DWORD, + pCountryOrRegionStringList: [DOT11_COUNTRY_OR_REGION_STRING; 1], +}} +pub type PWLAN_COUNTRY_OR_REGION_STRING_LIST = *mut WLAN_COUNTRY_OR_REGION_STRING_LIST; +STRUCT!{struct WLAN_PROFILE_INFO_LIST { + dwNumberOfItems: DWORD, + dwIndex: DWORD, + ProfileInfo: [WLAN_PROFILE_INFO; 1], +}} +pub type PWLAN_PROFILE_INFO_LIST = *mut WLAN_PROFILE_INFO_LIST; +STRUCT!{struct WLAN_AVAILABLE_NETWORK_LIST { + dwNumberOfItems: DWORD, + dwIndex: DWORD, + Network: [WLAN_AVAILABLE_NETWORK; 1], +}} +pub type PWLAN_AVAILABLE_NETWORK_LIST = *mut WLAN_AVAILABLE_NETWORK_LIST; +STRUCT!{struct WLAN_AVAILABLE_NETWORK_LIST_V2 { + dwNumberOfItems: DWORD, + dwIndex: DWORD, + Network: [WLAN_AVAILABLE_NETWORK_V2; 1], +}} +pub type PWLAN_AVAILABLE_NETWORK_LIST_V2 = *mut WLAN_AVAILABLE_NETWORK_LIST_V2; +STRUCT!{struct WLAN_INTERFACE_INFO_LIST { + dwNumberOfItems: DWORD, + dwIndex: DWORD, + InterfaceInfo: [WLAN_INTERFACE_INFO; 1], +}} +pub type PWLAN_INTERFACE_INFO_LIST = *mut WLAN_INTERFACE_INFO_LIST; +STRUCT!{struct DOT11_NETWORK_LIST { + dwNumberOfItems: DWORD, + dwIndex: DWORD, + Network: [DOT11_NETWORK; 1], +}} +pub type PDOT11_NETWORK_LIST = *mut DOT11_NETWORK_LIST; +ENUM!{enum WLAN_POWER_SETTING { + wlan_power_setting_no_saving = 0, + wlan_power_setting_low_saving = 1, + wlan_power_setting_medium_saving = 2, + wlan_power_setting_maximum_saving = 3, + wlan_power_setting_invalid = 4, +}} +pub type PWLAN_POWER_SETTING = *mut WLAN_POWER_SETTING; +pub const WLAN_CONNECTION_HIDDEN_NETWORK: DWORD = 0x00000001; +pub const WLAN_CONNECTION_ADHOC_JOIN_ONLY: DWORD = 0x00000002; +pub const WLAN_CONNECTION_IGNORE_PRIVACY_BIT: DWORD = 0x00000004; +pub const WLAN_CONNECTION_EAPOL_PASSTHROUGH: DWORD = 0x00000008; +pub const WLAN_CONNECTION_PERSIST_DISCOVERY_PROFILE: DWORD = 0x00000010; +pub const WLAN_CONNECTION_PERSIST_DISCOVERY_PROFILE_CONNECTION_MODE_AUTO: DWORD = 0x00000020; +pub const WLAN_CONNECTION_PERSIST_DISCOVERY_PROFILE_OVERWRITE_EXISTING: DWORD = 0x00000040; +STRUCT!{struct WLAN_CONNECTION_PARAMETERS { + wlanConnectionMode: WLAN_CONNECTION_MODE, + strProfile: LPCWSTR, + pDot11Ssid: PDOT11_SSID, + pDesiredBssidList: PDOT11_BSSID_LIST, + dot11BssType: DOT11_BSS_TYPE, + dwFlags: DWORD, +}} +pub type PWLAN_CONNECTION_PARAMETERS = *mut WLAN_CONNECTION_PARAMETERS; +STRUCT!{struct WLAN_CONNECTION_PARAMETERS_V2 { + wlanConnectionMode: WLAN_CONNECTION_MODE, + strProfile: LPCWSTR, + pDot11Ssid: PDOT11_SSID, + pDot11Hessid: PDOT11_HESSID, + pDesiredBssidList: PDOT11_BSSID_LIST, + dot11BssType: DOT11_BSS_TYPE, + dwFlags: DWORD, + pDot11AccessNetworkOptions: PDOT11_ACCESSNETWORKOPTIONS, +}} +pub type PWLAN_CONNECTION_PARAMETERS_V2 = *mut WLAN_CONNECTION_PARAMETERS_V2; +STRUCT!{struct WLAN_MSM_NOTIFICATION_DATA { + wlanConnectionMode: WLAN_CONNECTION_MODE, + strProfileName: [WCHAR; WLAN_MAX_NAME_LENGTH], + dot11Ssid: DOT11_SSID, + dot11BssType: DOT11_BSS_TYPE, + dot11MacAddr: DOT11_MAC_ADDRESS, + bSecurityEnabled: BOOL, + bFirstPeer: BOOL, + bLastPeer: BOOL, + wlanReasonCode: WLAN_REASON_CODE, +}} +pub type PWLAN_MSM_NOTIFICATION_DATA = *mut WLAN_MSM_NOTIFICATION_DATA; +pub const WLAN_CONNECTION_NOTIFICATION_ADHOC_NETWORK_FORMED: DWORD = 0x00000001; +pub const WLAN_CONNECTION_NOTIFICATION_CONSOLE_USER_PROFILE: DWORD = 0x00000004; +STRUCT!{struct WLAN_CONNECTION_NOTIFICATION_DATA { + wlanConnectionMode: WLAN_CONNECTION_MODE, + strProfileName: [WCHAR; WLAN_MAX_NAME_LENGTH], + dot11Ssid: DOT11_SSID, + dot11BssType: DOT11_BSS_TYPE, + bSecurityEnabled: BOOL, + wlanReasonCode: WLAN_REASON_CODE, + dwFlags: DWORD, + strProfileXml: [WCHAR; 1], +}} +pub type PWLAN_CONNECTION_NOTIFICATION_DATA = *mut WLAN_CONNECTION_NOTIFICATION_DATA; +pub const WLAN_NOTIFICATION_SOURCE_NONE: DWORD = L2_NOTIFICATION_SOURCE_NONE; +pub const WLAN_NOTIFICATION_SOURCE_ALL: DWORD = L2_NOTIFICATION_SOURCE_ALL; +pub const WLAN_NOTIFICATION_SOURCE_ACM: DWORD = L2_NOTIFICATION_SOURCE_WLAN_ACM; +pub const WLAN_NOTIFICATION_SOURCE_MSM: DWORD = L2_NOTIFICATION_SOURCE_WLAN_MSM; +pub const WLAN_NOTIFICATION_SOURCE_SECURITY: DWORD = L2_NOTIFICATION_SOURCE_WLAN_SECURITY; +pub const WLAN_NOTIFICATION_SOURCE_IHV: DWORD = L2_NOTIFICATION_SOURCE_WLAN_IHV; +pub const WLAN_NOTIFICATION_SOURCE_HNWK: DWORD = L2_NOTIFICATION_SOURCE_WLAN_HNWK; +pub const WLAN_NOTIFICATION_SOURCE_ONEX: DWORD = L2_NOTIFICATION_SOURCE_ONEX; +ENUM!{enum WLAN_NOTIFICATION_ACM { + wlan_notification_acm_start = L2_NOTIFICATION_CODE_PUBLIC_BEGIN, + wlan_notification_acm_autoconf_enabled = 1, + wlan_notification_acm_autoconf_disabled = 2, + wlan_notification_acm_background_scan_enabled = 3, + wlan_notification_acm_background_scan_disabled = 4, + wlan_notification_acm_bss_type_change = 5, + wlan_notification_acm_power_setting_change = 6, + wlan_notification_acm_scan_complete = 7, + wlan_notification_acm_scan_fail = 8, + wlan_notification_acm_connection_start = 9, + wlan_notification_acm_connection_complete = 10, + wlan_notification_acm_connection_attempt_fail = 11, + wlan_notification_acm_filter_list_change = 12, + wlan_notification_acm_interface_arrival = 13, + wlan_notification_acm_interface_removal = 14, + wlan_notification_acm_profile_change = 15, + wlan_notification_acm_profile_name_change = 16, + wlan_notification_acm_profiles_exhausted = 17, + wlan_notification_acm_network_not_available = 18, + wlan_notification_acm_network_available = 19, + wlan_notification_acm_disconnecting = 20, + wlan_notification_acm_disconnected = 21, + wlan_notification_acm_adhoc_network_state_change = 22, + wlan_notification_acm_profile_unblocked = 23, + wlan_notification_acm_screen_power_change = 24, + wlan_notification_acm_profile_blocked = 25, + wlan_notification_acm_scan_list_refresh = 26, + wlan_notification_acm_operational_state_change = 27, + wlan_notification_acm_end = 28, +}} +pub type PWLAN_NOTIFICATION_ACM = *mut WLAN_NOTIFICATION_ACM; +ENUM!{enum WLAN_NOTIFICATION_MSM { + wlan_notification_msm_start = L2_NOTIFICATION_CODE_PUBLIC_BEGIN, + wlan_notification_msm_associating = 1, + wlan_notification_msm_associated = 2, + wlan_notification_msm_authenticating = 3, + wlan_notification_msm_connected = 4, + wlan_notification_msm_roaming_start = 5, + wlan_notification_msm_roaming_end = 6, + wlan_notification_msm_radio_state_change = 7, + wlan_notification_msm_signal_quality_change = 8, + wlan_notification_msm_disassociating = 9, + wlan_notification_msm_disconnected = 10, + wlan_notification_msm_peer_join = 11, + wlan_notification_msm_peer_leave = 12, + wlan_notification_msm_adapter_removal = 13, + wlan_notification_msm_adapter_operation_mode_change = 14, + wlan_notification_msm_link_degraded = 15, + wlan_notification_msm_link_improved = 16, + wlan_notification_msm_end = 17, +}} +pub type PWLAN_NOTIFICATION_MSM = *mut WLAN_NOTIFICATION_MSM; +ENUM!{enum WLAN_NOTIFICATION_SECURITY { + wlan_notification_security_start = L2_NOTIFICATION_CODE_PUBLIC_BEGIN, + wlan_notification_security_end = 1, +}} +pub type PWLAN_NOTIFICATION_SECURITY = *mut WLAN_NOTIFICATION_SECURITY; +pub type WLAN_NOTIFICATION_DATA = L2_NOTIFICATION_DATA; +pub type PWLAN_NOTIFICATION_DATA = *mut L2_NOTIFICATION_DATA; +FN!{stdcall WLAN_NOTIFICATION_CALLBACK( + PWLAN_NOTIFICATION_DATA, + PVOID, +) -> ()} +ENUM!{enum WLAN_OPCODE_VALUE_TYPE { + wlan_opcode_value_type_query_only = 0, + wlan_opcode_value_type_set_by_group_policy = 1, + wlan_opcode_value_type_set_by_user = 2, + wlan_opcode_value_type_invalid = 3, +}} +pub type PWLAN_OPCODE_VALUE_TYPE = *mut WLAN_OPCODE_VALUE_TYPE; +ENUM!{enum WLAN_INTF_OPCODE { + wlan_intf_opcode_autoconf_start = 0x000000000, + wlan_intf_opcode_autoconf_enabled = 1, + wlan_intf_opcode_background_scan_enabled = 2, + wlan_intf_opcode_media_streaming_mode = 3, + wlan_intf_opcode_radio_state = 4, + wlan_intf_opcode_bss_type = 5, + wlan_intf_opcode_interface_state = 6, + wlan_intf_opcode_current_connection = 7, + wlan_intf_opcode_channel_number = 8, + wlan_intf_opcode_supported_infrastructure_auth_cipher_pairs = 9, + wlan_intf_opcode_supported_adhoc_auth_cipher_pairs = 10, + wlan_intf_opcode_supported_country_or_region_string_list = 11, + wlan_intf_opcode_current_operation_mode = 12, + wlan_intf_opcode_supported_safe_mode = 13, + wlan_intf_opcode_certified_safe_mode = 14, + wlan_intf_opcode_hosted_network_capable = 15, + wlan_intf_opcode_management_frame_protection_capable = 16, + wlan_intf_opcode_autoconf_end = 0x0fffffff, + wlan_intf_opcode_msm_start = 0x10000100, + wlan_intf_opcode_statistics = 268435713, + wlan_intf_opcode_rssi = 268435714, + wlan_intf_opcode_msm_end = 0x1fffffff, + wlan_intf_opcode_security_start = 0x20010000, + wlan_intf_opcode_security_end = 0x2fffffff, + wlan_intf_opcode_ihv_start = 0x30000000, + wlan_intf_opcode_ihv_end = 0x3fffffff, +}} +pub type PWLAN_INTF_OPCODE = *mut WLAN_INTF_OPCODE; +ENUM!{enum WLAN_AUTOCONF_OPCODE { + wlan_autoconf_opcode_start = 0, + wlan_autoconf_opcode_show_denied_networks = 1, + wlan_autoconf_opcode_power_setting = 2, + wlan_autoconf_opcode_only_use_gp_profiles_for_allowed_networks = 3, + wlan_autoconf_opcode_allow_explicit_creds = 4, + wlan_autoconf_opcode_block_period = 5, + wlan_autoconf_opcode_allow_virtual_station_extensibility = 6, + wlan_autoconf_opcode_end = 7, +}} +pub type PWLAN_AUTOCONF_OPCODE = *mut WLAN_AUTOCONF_OPCODE; +ENUM!{enum WLAN_IHV_CONTROL_TYPE { + wlan_ihv_control_type_service = 0, + wlan_ihv_control_type_driver = 1, +}} +pub type PWLAN_IHV_CONTROL_TYPE = *mut WLAN_IHV_CONTROL_TYPE; +ENUM!{enum WLAN_FILTER_LIST_TYPE { + wlan_filter_list_type_gp_permit = 0, + wlan_filter_list_type_gp_deny = 1, + wlan_filter_list_type_user_permit = 2, + wlan_filter_list_type_user_deny = 3, +}} +pub type PWLAN_FILTER_LIST_TYPE = *mut WLAN_FILTER_LIST_TYPE; +STRUCT!{struct WLAN_PHY_FRAME_STATISTICS { + ullTransmittedFrameCount: ULONGLONG, + ullMulticastTransmittedFrameCount: ULONGLONG, + ullFailedCount: ULONGLONG, + ullRetryCount: ULONGLONG, + ullMultipleRetryCount: ULONGLONG, + ullMaxTXLifetimeExceededCount: ULONGLONG, + ullTransmittedFragmentCount: ULONGLONG, + ullRTSSuccessCount: ULONGLONG, + ullRTSFailureCount: ULONGLONG, + ullACKFailureCount: ULONGLONG, + ullReceivedFrameCount: ULONGLONG, + ullMulticastReceivedFrameCount: ULONGLONG, + ullPromiscuousReceivedFrameCount: ULONGLONG, + ullMaxRXLifetimeExceededCount: ULONGLONG, + ullFrameDuplicateCount: ULONGLONG, + ullReceivedFragmentCount: ULONGLONG, + ullPromiscuousReceivedFragmentCount: ULONGLONG, + ullFCSErrorCount: ULONGLONG, +}} +pub type PWLAN_PHY_FRAME_STATISTICS = *mut WLAN_PHY_FRAME_STATISTICS; +STRUCT!{struct WLAN_MAC_FRAME_STATISTICS { + ullTransmittedFrameCount: ULONGLONG, + ullReceivedFrameCount: ULONGLONG, + ullWEPExcludedCount: ULONGLONG, + ullTKIPLocalMICFailures: ULONGLONG, + ullTKIPReplays: ULONGLONG, + ullTKIPICVErrorCount: ULONGLONG, + ullCCMPReplays: ULONGLONG, + ullCCMPDecryptErrors: ULONGLONG, + ullWEPUndecryptableCount: ULONGLONG, + ullWEPICVErrorCount: ULONGLONG, + ullDecryptSuccessCount: ULONGLONG, + ullDecryptFailureCount: ULONGLONG, +}} +pub type PWLAN_MAC_FRAME_STATISTICS = *mut WLAN_MAC_FRAME_STATISTICS; +STRUCT!{struct WLAN_STATISTICS { + ullFourWayHandshakeFailures: ULONGLONG, + ullTKIPCounterMeasuresInvoked: ULONGLONG, + ullReserved: ULONGLONG, + MacUcastCounters: WLAN_MAC_FRAME_STATISTICS, + MacMcastCounters: WLAN_MAC_FRAME_STATISTICS, + dwNumberOfPhys: DWORD, + PhyCounters: [WLAN_PHY_FRAME_STATISTICS; 1], +}} +pub type PWLAN_STATISTICS = *mut WLAN_STATISTICS; +pub const WLAN_READ_ACCESS: DWORD = STANDARD_RIGHTS_READ | FILE_READ_DATA; +pub const WLAN_EXECUTE_ACCESS: DWORD = WLAN_READ_ACCESS | STANDARD_RIGHTS_EXECUTE | FILE_EXECUTE; +pub const WLAN_WRITE_ACCESS: DWORD = WLAN_READ_ACCESS | WLAN_EXECUTE_ACCESS | STANDARD_RIGHTS_WRITE + | FILE_WRITE_DATA | DELETE | WRITE_DAC; +ENUM!{enum WLAN_SECURABLE_OBJECT { + wlan_secure_permit_list = 0, + wlan_secure_deny_list = 1, + wlan_secure_ac_enabled = 2, + wlan_secure_bc_scan_enabled = 3, + wlan_secure_bss_type = 4, + wlan_secure_show_denied = 5, + wlan_secure_interface_properties = 6, + wlan_secure_ihv_control = 7, + wlan_secure_all_user_profiles_order = 8, + wlan_secure_add_new_all_user_profiles = 9, + wlan_secure_add_new_per_user_profiles = 10, + wlan_secure_media_streaming_mode_enabled = 11, + wlan_secure_current_operation_mode = 12, + wlan_secure_get_plaintext_key = 13, + wlan_secure_hosted_network_elevated_access = 14, + wlan_secure_virtual_station_extensibility = 15, + wlan_secure_wfd_elevated_access = 16, + WLAN_SECURABLE_OBJECT_COUNT = 17, +}} +pub type PWLAN_SECURABLE_OBJECT = *mut WLAN_SECURABLE_OBJECT; +STRUCT!{struct WLAN_DEVICE_SERVICE_GUID_LIST { + dwNumberOfItems: DWORD, + dwIndex: DWORD, + DeviceService: [GUID; 1], +}} +pub type PWLAN_DEVICE_SERVICE_GUID_LIST = *mut WLAN_DEVICE_SERVICE_GUID_LIST; +pub const WFD_API_VERSION_1_0: DWORD = 0x00000001; +pub const WFD_API_VERSION: DWORD = WFD_API_VERSION_1_0; +ENUM!{enum WFD_ROLE_TYPE { + WFD_ROLE_TYPE_NONE = 0x00, + WFD_ROLE_TYPE_DEVICE = 0x01, + WFD_ROLE_TYPE_GROUP_OWNER = 0x02, + WFD_ROLE_TYPE_CLIENT = 0x04, + WFD_ROLE_TYPE_MAX = 0x05, +}} +pub type PWFD_ROLE_TYPE = *mut WFD_ROLE_TYPE; +STRUCT!{struct WFD_GROUP_ID { + DeviceAddress: DOT11_MAC_ADDRESS, + GroupSSID: DOT11_SSID, +}} +pub type PWFD_GROUP_ID = *mut WFD_GROUP_ID; +extern "system" { + pub fn WlanOpenHandle( + dwClientVersion: DWORD, + pReserved: PVOID, + pdwNegotiatedVersion: PDWORD, + phClientHandle: PHANDLE, + ) -> DWORD; + pub fn WlanCloseHandle( + hClientHandle: HANDLE, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanEnumInterfaces( + hClientHandle: HANDLE, + pReserved: PVOID, + ppInterfaceList: *mut PWLAN_INTERFACE_INFO_LIST, + ) -> DWORD; + pub fn WlanSetAutoConfigParameter( + hClientHandle: HANDLE, + OpCode: WLAN_AUTOCONF_OPCODE, + dwDataSize: DWORD, + pData: PVOID, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanQueryAutoConfigParameter( + hClientHandle: HANDLE, + OpCode: WLAN_AUTOCONF_OPCODE, + pReserved: PVOID, + pdwDataSize: PDWORD, + ppData: *mut PVOID, + pWlanOpcodeValueType: PWLAN_OPCODE_VALUE_TYPE, + ) -> DWORD; + pub fn WlanGetInterfaceCapability( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + pReserved: PVOID, + ppCapability: *mut PWLAN_INTERFACE_CAPABILITY, + ) -> DWORD; + pub fn WlanSetInterface( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + OpCode: WLAN_INTF_OPCODE, + dwDataSize: DWORD, + pData: PVOID, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanQueryInterface( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + OpCode: WLAN_INTF_OPCODE, + pReserved: PVOID, + pdwDataSize: PDWORD, + ppData: *mut PVOID, + pWlanOpcodeValueType: PWLAN_OPCODE_VALUE_TYPE, + ) -> DWORD; + pub fn WlanIhvControl( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + Type: WLAN_IHV_CONTROL_TYPE, + dwInBufferSize: DWORD, + pInBuffer: PVOID, + dwOutBufferSize: DWORD, + pOutBuffer: PVOID, + pdwBytesReturned: PDWORD, + ) -> DWORD; + pub fn WlanScan( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + pDot11Ssid: PDOT11_SSID, + pIeData: PWLAN_RAW_DATA, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanGetAvailableNetworkList( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + dwFlags: DWORD, + pReserved: PVOID, + ppAvailableNetworkList: *mut PWLAN_AVAILABLE_NETWORK_LIST, + ) -> DWORD; + pub fn WlanGetAvailableNetworkList2( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + dwFlags: DWORD, + pReserved: PVOID, + ppAvailableNetworkList: *mut PWLAN_AVAILABLE_NETWORK_LIST_V2, + ) -> DWORD; + pub fn WlanGetNetworkBssList( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + pDot11Ssid: PDOT11_SSID, + dot11BssType: DOT11_BSS_TYPE, + bSecurityEnabled: BOOL, + pReserved: PVOID, + ppWlanBssList: *mut PWLAN_BSS_LIST, + ) -> DWORD; + pub fn WlanConnect( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + pConnectionParameters: PWLAN_CONNECTION_PARAMETERS, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanConnect2( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + pConnectionParameters: PWLAN_CONNECTION_PARAMETERS_V2, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanDisconnect( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanRegisterNotification( + hClientHandle: HANDLE, + dwNotifSource: DWORD, + bIgnoreDuplicate: BOOL, + funcCallback: WLAN_NOTIFICATION_CALLBACK, + pCallbackContext: PVOID, + pReserved: PVOID, + pdwPrevNotifSource: PDWORD, + ) -> DWORD; + pub fn WlanGetProfile( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + strProfileName: LPCWSTR, + pReserved: PVOID, + pstrProfileXml: *mut LPWSTR, + pdwFlags: *mut DWORD, + pdwGrantedAccess: *mut DWORD, + ) -> DWORD; + pub fn WlanSetProfileEapUserData( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + strProfileName: LPCWSTR, + eapType: EAP_METHOD_TYPE, + dwFlags: DWORD, + dwEapUserDataSize: DWORD, + pbEapUserData: LPBYTE, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanSetProfileEapXmlUserData( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + strProfileName: LPCWSTR, + dwFlags: DWORD, + strEapXmlUserData: LPCWSTR, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanSetProfile( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + dwFlags: DWORD, + strProfileXml: LPCWSTR, + strAllUserProfileSecurity: LPCWSTR, + bOverwrite: BOOL, + pReserved: PVOID, + pdwReasonCode: *mut DWORD, + ) -> DWORD; + pub fn WlanDeleteProfile( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + strProfileName: LPCWSTR, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanRenameProfile( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + strOldProfileName: LPCWSTR, + strNewProfileName: LPCWSTR, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanGetProfileList( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + pReserved: PVOID, + ppProfileList: *mut PWLAN_PROFILE_INFO_LIST, + ) -> DWORD; + pub fn WlanSetProfileList( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + dwItems: DWORD, + strProfileNames: *mut LPCWSTR, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanSetProfilePosition( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + strProfileName: LPCWSTR, + dwPosition: DWORD, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanSetProfileCustomUserData( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + strProfileName: LPCWSTR, + dwDataSize: DWORD, + pData: PBYTE, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanGetProfileCustomUserData( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + strProfileName: LPCWSTR, + pReserved: PVOID, + pdwDataSize: *mut DWORD, + ppData: *mut PBYTE, + ) -> DWORD; + pub fn WlanSetFilterList( + hClientHandle: HANDLE, + wlanFilterListType: WLAN_FILTER_LIST_TYPE, + pNetworkList: PDOT11_NETWORK_LIST, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanGetFilterList( + hClientHandle: HANDLE, + wlanFilterListType: WLAN_FILTER_LIST_TYPE, + pReserved: PVOID, + ppNetworkList: *mut PDOT11_NETWORK_LIST, + ) -> DWORD; + pub fn WlanSetPsdIEDataList( + hClientHandle: HANDLE, + strFormat: LPCWSTR, + pPsdIEDataList: PWLAN_RAW_DATA_LIST, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanSaveTemporaryProfile( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + strProfileName: LPCWSTR, + strAllUserProfileSecurity: LPCWSTR, + dwFlags: DWORD, + bOverWrite: BOOL, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanDeviceServiceCommand( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + pDeviceServiceGuid: LPGUID, + dwOpCode: DWORD, + dwInBufferSize: DWORD, + pInBuffer: PVOID, + dwOutBufferSize: DWORD, + pOutBuffer: PVOID, + pdwBytesReturned: PDWORD, + ) -> DWORD; + pub fn WlanGetSupportedDeviceServices( + hClientHandle: HANDLE, + pInterfaceGuid: *const GUID, + ppDevSvcGuidList: *mut PWLAN_DEVICE_SERVICE_GUID_LIST, + ) -> DWORD; + pub fn WlanExtractPsdIEDataList( + hClientHandle: HANDLE, + dwIeDataSize: DWORD, + pRawIeData: PBYTE, + strFormat: LPCWSTR, + pReserved: PVOID, + ppPsdIEDataList: *mut PWLAN_RAW_DATA_LIST, + ) -> DWORD; + pub fn WlanReasonCodeToString( + dwReasonCode: DWORD, + dwBufferSize: DWORD, + pStringBuffer: PWCHAR, + pReserved: PVOID, + ) -> DWORD; + pub fn WlanAllocateMemory( + dwMemorySize: DWORD, + ) -> PVOID; + pub fn WlanFreeMemory( + pMemory: PVOID, + ); + pub fn WlanSetSecuritySettings( + hClientHandle: HANDLE, + SecurableObject: WLAN_SECURABLE_OBJECT, + strModifiedSDDL: LPCWSTR, + ) -> DWORD; + pub fn WlanGetSecuritySettings( + hClientHandle: HANDLE, + SecurableObject: WLAN_SECURABLE_OBJECT, + pValueType: PWLAN_OPCODE_VALUE_TYPE, + pstrCurrentSDDL: *mut LPWSTR, + pdwGrantedAccess: PDWORD, + ) -> DWORD; +} +pub const WLAN_UI_API_VERSION: DWORD = 1; +pub const WLAN_UI_API_INITIAL_VERSION: DWORD = 1; +ENUM!{enum WL_DISPLAY_PAGES { + WLConnectionPage = 0, + WLSecurityPage = 1, + WLAdvPage = 2, +}} +pub type PWL_DISPLAY_PAGES = *mut WL_DISPLAY_PAGES; +extern "system" { + pub fn WlanUIEditProfile( + dwClientVersion: DWORD, + wstrProfileName: LPCWSTR, + pInterfaceGuid: *mut GUID, + hWnd: HWND, + wlStartPage: WL_DISPLAY_PAGES, + pReserved: PVOID, + pWlanReasonCode: PWLAN_REASON_CODE, + ) -> DWORD; +} +ENUM!{enum WLAN_HOSTED_NETWORK_STATE { + wlan_hosted_network_unavailable = 0, + wlan_hosted_network_idle = 1, + wlan_hosted_network_active = 2, +}} +pub type PWLAN_HOSTED_NETWORK_STATE = *mut WLAN_HOSTED_NETWORK_STATE; +ENUM!{enum WLAN_HOSTED_NETWORK_REASON { + wlan_hosted_network_reason_success = 0, + wlan_hosted_network_reason_unspecified = 1, + wlan_hosted_network_reason_bad_parameters = 2, + wlan_hosted_network_reason_service_shutting_down = 3, + wlan_hosted_network_reason_insufficient_resources = 4, + wlan_hosted_network_reason_elevation_required = 5, + wlan_hosted_network_reason_read_only = 6, + wlan_hosted_network_reason_persistence_failed = 7, + wlan_hosted_network_reason_crypt_error = 8, + wlan_hosted_network_reason_impersonation = 9, + wlan_hosted_network_reason_stop_before_start = 10, + wlan_hosted_network_reason_interface_available = 11, + wlan_hosted_network_reason_interface_unavailable = 12, + wlan_hosted_network_reason_miniport_stopped = 13, + wlan_hosted_network_reason_miniport_started = 14, + wlan_hosted_network_reason_incompatible_connection_started = 15, + wlan_hosted_network_reason_incompatible_connection_stopped = 16, + wlan_hosted_network_reason_user_action = 17, + wlan_hosted_network_reason_client_abort = 18, + wlan_hosted_network_reason_ap_start_failed = 19, + wlan_hosted_network_reason_peer_arrived = 20, + wlan_hosted_network_reason_peer_departed = 21, + wlan_hosted_network_reason_peer_timeout = 22, + wlan_hosted_network_reason_gp_denied = 23, + wlan_hosted_network_reason_service_unavailable = 24, + wlan_hosted_network_reason_device_change = 25, + wlan_hosted_network_reason_properties_change = 26, + wlan_hosted_network_reason_virtual_station_blocking_use = 27, + wlan_hosted_network_reason_service_available_on_virtual_station = 28, +}} +pub type PWLAN_HOSTED_NETWORK_REASON = *mut WLAN_HOSTED_NETWORK_REASON; +ENUM!{enum WLAN_HOSTED_NETWORK_PEER_AUTH_STATE { + wlan_hosted_network_peer_state_invalid = 0, + wlan_hosted_network_peer_state_authenticated = 1, +}} +pub type PWLAN_HOSTED_NETWORK_PEER_AUTH_STATE = *mut WLAN_HOSTED_NETWORK_PEER_AUTH_STATE; +extern "system" { + pub fn WlanHostedNetworkStartUsing( + hClientHandle: HANDLE, + pFailReason: PWLAN_HOSTED_NETWORK_REASON, + pvReserved: PVOID, + ) -> DWORD; + pub fn WlanHostedNetworkStopUsing( + hClientHandle: HANDLE, + pFailReason: PWLAN_HOSTED_NETWORK_REASON, + pvReserved: PVOID, + ) -> DWORD; + pub fn WlanHostedNetworkForceStart( + hClientHandle: HANDLE, + pFailReason: PWLAN_HOSTED_NETWORK_REASON, + pvReserved: PVOID, + ) -> DWORD; + pub fn WlanHostedNetworkForceStop( + hClientHandle: HANDLE, + pFailReason: PWLAN_HOSTED_NETWORK_REASON, + pvReserved: PVOID, + ) -> DWORD; +} +STRUCT!{struct WLAN_HOSTED_NETWORK_PEER_STATE { + PeerMacAddress: DOT11_MAC_ADDRESS, + PeerAuthState: WLAN_HOSTED_NETWORK_PEER_AUTH_STATE, +}} +pub type PWLAN_HOSTED_NETWORK_PEER_STATE = *mut WLAN_HOSTED_NETWORK_PEER_STATE; +STRUCT!{struct WLAN_HOSTED_NETWORK_RADIO_STATE { + dot11SoftwareRadioState: DOT11_RADIO_STATE, + dot11HardwareRadioState: DOT11_RADIO_STATE, +}} +pub type PWLAN_HOSTED_NETWORK_RADIO_STATE = *mut WLAN_HOSTED_NETWORK_RADIO_STATE; +ENUM!{enum WLAN_HOSTED_NETWORK_NOTIFICATION_CODE { + wlan_hosted_network_state_change = L2_NOTIFICATION_CODE_V2_BEGIN, + wlan_hosted_network_peer_state_change = 4097, + wlan_hosted_network_radio_state_change = 4098, +}} +pub type PWLAN_HOSTED_NETWORK_NOTIFICATION_CODE = *mut WLAN_HOSTED_NETWORK_NOTIFICATION_CODE; +STRUCT!{struct WLAN_HOSTED_NETWORK_STATE_CHANGE { + OldState: WLAN_HOSTED_NETWORK_STATE, + NewState: WLAN_HOSTED_NETWORK_STATE, + StateChangeReason: WLAN_HOSTED_NETWORK_REASON, +}} +pub type PWLAN_HOSTED_NETWORK_STATE_CHANGE = *mut WLAN_HOSTED_NETWORK_STATE_CHANGE; +STRUCT!{struct WLAN_HOSTED_NETWORK_DATA_PEER_STATE_CHANGE { + OldState: WLAN_HOSTED_NETWORK_PEER_STATE, + NewState: WLAN_HOSTED_NETWORK_PEER_STATE, + PeerStateChangeReason: WLAN_HOSTED_NETWORK_REASON, +}} +pub type PWLAN_HOSTED_NETWORK_DATA_PEER_STATE_CHANGE = + *mut WLAN_HOSTED_NETWORK_DATA_PEER_STATE_CHANGE; +ENUM!{enum WLAN_HOSTED_NETWORK_OPCODE { + wlan_hosted_network_opcode_connection_settings = 0, + wlan_hosted_network_opcode_security_settings = 1, + wlan_hosted_network_opcode_station_profile = 2, + wlan_hosted_network_opcode_enable = 3, +}} +pub type PWLAN_HOSTED_NETWORK_OPCODE = *mut WLAN_HOSTED_NETWORK_OPCODE; +STRUCT!{struct WLAN_HOSTED_NETWORK_CONNECTION_SETTINGS { + hostedNetworkSSID: DOT11_SSID, + dwMaxNumberOfPeers: DWORD, +}} +pub type PWLAN_HOSTED_NETWORK_CONNECTION_SETTINGS = *mut WLAN_HOSTED_NETWORK_CONNECTION_SETTINGS; +STRUCT!{struct WLAN_HOSTED_NETWORK_SECURITY_SETTINGS { + dot11AuthAlgo: DOT11_AUTH_ALGORITHM, + dot11CipherAlgo: DOT11_CIPHER_ALGORITHM, +}} +pub type PWLAN_HOSTED_NETWORK_SECURITY_SETTINGS = *mut WLAN_HOSTED_NETWORK_SECURITY_SETTINGS; +extern "system" { + pub fn WlanHostedNetworkQueryProperty( + hClientHandle: HANDLE, + OpCode: WLAN_HOSTED_NETWORK_OPCODE, + pdwDataSize: PDWORD, + ppvData: *mut PVOID, + pWlanOpcodeValueType: PWLAN_OPCODE_VALUE_TYPE, + pvReserved: PVOID, + ) -> DWORD; + pub fn WlanHostedNetworkSetProperty( + hClientHandle: HANDLE, + OpCode: WLAN_HOSTED_NETWORK_OPCODE, + dwDataSize: DWORD, + pvData: PVOID, + pFailReason: PWLAN_HOSTED_NETWORK_REASON, + pvReserved: PVOID, + ) -> DWORD; + pub fn WlanHostedNetworkInitSettings( + hClientHandle: HANDLE, + pFailReason: PWLAN_HOSTED_NETWORK_REASON, + pvReserved: PVOID, + ) -> DWORD; + pub fn WlanHostedNetworkRefreshSecuritySettings( + hClientHandle: HANDLE, + pFailReason: PWLAN_HOSTED_NETWORK_REASON, + pvReserved: PVOID, + ) -> DWORD; +} +STRUCT!{struct WLAN_HOSTED_NETWORK_STATUS { + HostedNetworkState: WLAN_HOSTED_NETWORK_STATE, + IPDeviceID: GUID, + wlanHostedNetworkBSSID: DOT11_MAC_ADDRESS, + dot11PhyType: DOT11_PHY_TYPE, + ulChannelFrequency: ULONG, + dwNumberOfPeers: DWORD, + PeerList: [WLAN_HOSTED_NETWORK_PEER_STATE; 1], +}} +pub type PWLAN_HOSTED_NETWORK_STATUS = *mut WLAN_HOSTED_NETWORK_STATUS; +extern "system" { + pub fn WlanHostedNetworkQueryStatus( + hClientHandle: HANDLE, + ppWlanHostedNetworkStatus: *mut PWLAN_HOSTED_NETWORK_STATUS, + pvReserved: PVOID, + ) -> DWORD; + pub fn WlanHostedNetworkSetSecondaryKey( + hClientHandle: HANDLE, + dwKeyLength: DWORD, + pucKeyData: PUCHAR, + bIsPassPhrase: BOOL, + bPersistent: BOOL, + pFailReason: PWLAN_HOSTED_NETWORK_REASON, + pvReserved: PVOID, + ) -> DWORD; + pub fn WlanHostedNetworkQuerySecondaryKey( + hClientHandle: HANDLE, + pdwKeyLength: PDWORD, + ppucKeyData: *mut PUCHAR, + pbIsPassPhrase: PBOOL, + pbPersistent: PBOOL, + pFailReason: PWLAN_HOSTED_NETWORK_REASON, + pvReserved: PVOID, + ) -> DWORD; + pub fn WlanRegisterVirtualStationNotification( + hClientHandle: HANDLE, + bRegister: BOOL, + pReserved: PVOID, + ) -> DWORD; +} +DEFINE_GUID!{GUID_DEVINTERFACE_WIFIDIRECT_DEVICE, + 0x439b20af, 0x8955, 0x405b, 0x99, 0xf0, 0xa6, 0x2a, 0xf0, 0xc6, 0x8d, 0x43} +DEFINE_GUID!{GUID_AEPSERVICE_WIFIDIRECT_DEVICE, + 0xcc29827c, 0x9caf, 0x4928, 0x99, 0xa9, 0x18, 0xf7, 0xc2, 0x38, 0x13, 0x89} +DEFINE_GUID!{GUID_DEVINTERFACE_ASP_INFRA_DEVICE, + 0xff823995, 0x7a72, 0x4c80, 0x87, 0x57, 0xc6, 0x7e, 0xe1, 0x3d, 0x1a, 0x49} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_DeviceAddress, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x01} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_InterfaceAddress, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x02} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_InterfaceGuid, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x03} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_GroupId, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x04} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_IsConnected, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x05} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_IsVisible, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x06} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_IsLegacyDevice, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x07} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_MiracastVersion, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x08} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_IsMiracastLCPSupported, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x09} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_Services, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x0a} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_SupportedChannelList, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x0b} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_InformationElements, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x0c} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_DeviceAddressCopy, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x0d} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_IsRecentlyAssociated, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x0e} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_Service_Aeps, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x0f} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_NoMiracastAutoProject, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x10} +DEFINE_DEVPROPKEY!{DEVPKEY_InfraCast_Supported, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x11} +DEFINE_DEVPROPKEY!{DEVPKEY_InfraCast_StreamSecuritySupported, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x12} +DEFINE_DEVPROPKEY!{DEVPKEY_InfraCast_AccessPointBssid, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x13} +DEFINE_DEVPROPKEY!{DEVPKEY_InfraCast_SinkHostName, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x14} +DEFINE_DEVPROPKEY!{DEVPKEY_InfraCast_ChallengeAep, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x15} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_IsDMGCapable, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x16} +DEFINE_DEVPROPKEY!{DEVPKEY_InfraCast_DevnodeAep, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x17} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_FoundWsbService, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x18} +DEFINE_DEVPROPKEY!{DEVPKEY_InfraCast_HostName_ResolutionMode, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x19} +DEFINE_DEVPROPKEY!{DEVPKEY_InfraCast_SinkIpAddress, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x1a} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirect_TransientAssociation, + 0x1506935d, 0xe3e7, 0x450f, 0x86, 0x37, 0x82, 0x23, 0x3e, 0xbe, 0x5f, 0x6e, 0x1b} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirectServices_ServiceAddress, + 0x31b37743, 0x7c5e, 0x4005, 0x93, 0xe6, 0xe9, 0x53, 0xf9, 0x2b, 0x82, 0xe9, 0x02} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirectServices_ServiceName, + 0x31b37743, 0x7c5e, 0x4005, 0x93, 0xe6, 0xe9, 0x53, 0xf9, 0x2b, 0x82, 0xe9, 0x03} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirectServices_ServiceInformation, + 0x31b37743, 0x7c5e, 0x4005, 0x93, 0xe6, 0xe9, 0x53, 0xf9, 0x2b, 0x82, 0xe9, 0x04} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirectServices_AdvertisementId, + 0x31b37743, 0x7c5e, 0x4005, 0x93, 0xe6, 0xe9, 0x53, 0xf9, 0x2b, 0x82, 0xe9, 0x05} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirectServices_ServiceConfigMethods, + 0x31b37743, 0x7c5e, 0x4005, 0x93, 0xe6, 0xe9, 0x53, 0xf9, 0x2b, 0x82, 0xe9, 0x06} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFiDirectServices_RequestServiceInformation, + 0x31b37743, 0x7c5e, 0x4005, 0x93, 0xe6, 0xe9, 0x53, 0xf9, 0x2b, 0x82, 0xe9, 0x07} +extern "system" { + pub fn WFDOpenHandle( + dwClientVersion: DWORD, + pdwNegotiatedVersion: PDWORD, + phClientHandle: PHANDLE, + ) -> DWORD; + pub fn WFDCloseHandle( + hClientHandle: HANDLE, + ) -> DWORD; +} +FN!{stdcall WFD_OPEN_SESSION_COMPLETE_CALLBACK( + hSessionHandle: HANDLE, + pvContext: PVOID, + guidSessionInterface: GUID, + dwError: DWORD, + dwReasonCode: DWORD, +) -> ()} +extern "system" { + pub fn WFDStartOpenSession( + hClientHandle: HANDLE, + pDeviceAddress: PDOT11_MAC_ADDRESS, + pvContext: PVOID, + pfnCallback: WFD_OPEN_SESSION_COMPLETE_CALLBACK, + phSessionHandle: PHANDLE, + ) -> DWORD; + pub fn WFDCancelOpenSession( + hSessionHandle: HANDLE, + ) -> DWORD; + pub fn WFDOpenLegacySession( + hClientHandle: HANDLE, + pLegacyMacAddress: PDOT11_MAC_ADDRESS, + phSessionHandle: *mut HANDLE, + pGuidSessionInterface: *mut GUID, + ) -> DWORD; + pub fn WFDCloseSession( + hSessionHandle: HANDLE, + ) -> DWORD; + pub fn WFDUpdateDeviceVisibility( + pDeviceAddress: PDOT11_MAC_ADDRESS, + ) -> DWORD; +} +DEFINE_DEVPROPKEY!{DEVPKEY_WiFi_InterfaceGuid, + 0xef1167eb, 0xcbfc, 0x4341, 0xa5, 0x68, 0xa7, 0xc9, 0x1a, 0x68, 0x98, 0x2c, 0x02} diff -Nru cargo-0.44.1/vendor/winapi/src/um/wlanihv.rs cargo-0.47.0/vendor/winapi/src/um/wlanihv.rs --- cargo-0.44.1/vendor/winapi/src/um/wlanihv.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/wlanihv.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,410 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +//! Definition of public APIs for WLAN Extensibility Framework. +use shared::basetsd::UINT32; +use shared::guiddef::{CLSID, GUID}; +use shared::minwindef::{BOOL, BYTE, DWORD, LPVOID, PBOOL, PBYTE, PDWORD, UCHAR, ULONG, USHORT}; +use shared::windot11::{ + DOT11_ASSOC_STATUS, DOT11_DIRECTION, PDOT11_ASSOCIATION_COMPLETION_PARAMETERS, + PDOT11_CIPHER_DEFAULT_KEY_VALUE, PDOT11_CIPHER_KEY_MAPPING_KEY_VALUE, PDOT11_MAC_ADDRESS, + PDOT11_PRIVACY_EXEMPTION, +}; +use shared::wlantypes::{DOT11_AUTH_ALGORITHM, DOT11_BSS_TYPE, DOT11_CIPHER_ALGORITHM, DOT11_SSID}; +use um::dot1x::{ONEX_AUTH_STATUS, ONEX_REASON_CODE}; +use um::eaptypes::EAP_ATTRIBUTES; +use um::l2cmn::PL2_NOTIFICATION_DATA; +use um::winnt::{HANDLE, LPWSTR, PHANDLE, WCHAR}; +use um::winuser::PWTSSESSION_NOTIFICATION; +use um::wlanihvtypes::{MS_MAX_PROFILE_NAME_LENGTH, PDOT11EXT_IHV_PROFILE_PARAMS}; +use um::wlclient::{PDOT11_ADAPTER, PDOT11_BSS_LIST, PDOT11_PORT_STATE}; +STRUCT!{struct DOT11EXT_APIS { + Dot11ExtAllocateBuffer: DOT11EXT_ALLOCATE_BUFFER, + Dot11ExtFreeBuffer: DOT11EXT_FREE_BUFFER, + Dot11ExtSetProfileCustomUserData: DOT11EXT_SET_PROFILE_CUSTOM_USER_DATA, + Dot11ExtGetProfileCustomUserData: DOT11EXT_GET_PROFILE_CUSTOM_USER_DATA, + Dot11ExtSetCurrentProfile: DOT11EXT_SET_CURRENT_PROFILE, + Dot11ExtSendUIRequest: DOT11EXT_SEND_UI_REQUEST, + Dot11ExtPreAssociateCompletion: DOT11EXT_PRE_ASSOCIATE_COMPLETION, + Dot11ExtPostAssociateCompletion: DOT11EXT_POST_ASSOCIATE_COMPLETION, + Dot11ExtSendNotification: DOT11EXT_SEND_NOTIFICATION, + Dot11ExtSendPacket: DOT11EXT_SEND_PACKET, + Dot11ExtSetEtherTypeHandling: DOT11EXT_SET_ETHERTYPE_HANDLING, + Dot11ExtSetAuthAlgorithm: DOT11EXT_SET_AUTH_ALGORITHM, + Dot11ExtSetUnicastCipherAlgorithm: DOT11EXT_SET_UNICAST_CIPHER_ALGORITHM, + Dot11ExtSetMulticastCipherAlgorithm: DOT11EXT_SET_MULTICAST_CIPHER_ALGORITHM, + Dot11ExtSetDefaultKey: DOT11EXT_SET_DEFAULT_KEY, + Dot11ExtSetKeyMappingKey: DOT11EXT_SET_KEY_MAPPING_KEY, + Dot11ExtSetDefaultKeyId: DOT11EXT_SET_DEFAULT_KEY_ID, + Dot11ExtNicSpecificExtension: DOT11EXT_NIC_SPECIFIC_EXTENSION, + Dot11ExtSetExcludeUnencrypted: DOT11EXT_SET_EXCLUDE_UNENCRYPTED, + Dot11ExtStartOneX: DOT11EXT_ONEX_START, + Dot11ExtStopOneX: DOT11EXT_ONEX_STOP, + Dot11ExtProcessSecurityPacket: DOT11EXT_PROCESS_ONEX_PACKET, +}} +pub type PDOT11EXT_APIS = *mut DOT11EXT_APIS; +STRUCT!{struct DOT11EXT_IHV_HANDLERS { + Dot11ExtIhvDeinitService: DOT11EXTIHV_DEINIT_SERVICE, + Dot11ExtIhvInitAdapter: DOT11EXTIHV_INIT_ADAPTER, + Dot11ExtIhvDeinitAdapter: DOT11EXTIHV_DEINIT_ADAPTER, + Dot11ExtIhvPerformPreAssociate: DOT11EXTIHV_PERFORM_PRE_ASSOCIATE, + Dot11ExtIhvAdapterReset: DOT11EXTIHV_ADAPTER_RESET, + Dot11ExtIhvPerformPostAssociate: DOT11EXTIHV_PERFORM_POST_ASSOCIATE, + Dot11ExtIhvStopPostAssociate: DOT11EXTIHV_STOP_POST_ASSOCIATE, + Dot11ExtIhvValidateProfile: DOT11EXTIHV_VALIDATE_PROFILE, + Dot11ExtIhvPerformCapabilityMatch: DOT11EXTIHV_PERFORM_CAPABILITY_MATCH, + Dot11ExtIhvCreateDiscoveryProfiles: DOT11EXTIHV_CREATE_DISCOVERY_PROFILES, + Dot11ExtIhvProcessSessionChange: DOT11EXTIHV_PROCESS_SESSION_CHANGE, + Dot11ExtIhvReceiveIndication: DOT11EXTIHV_RECEIVE_INDICATION, + Dot11ExtIhvReceivePacket: DOT11EXTIHV_RECEIVE_PACKET, + Dot11ExtIhvSendPacketCompletion: DOT11EXTIHV_SEND_PACKET_COMPLETION, + Dot11ExtIhvIsUIRequestPending: DOT11EXTIHV_IS_UI_REQUEST_PENDING, + Dot11ExtIhvProcessUIResponse: DOT11EXTIHV_PROCESS_UI_RESPONSE, + Dot11ExtIhvQueryUIRequest: DOT11EXTIHV_QUERY_UI_REQUEST, + Dot11ExtIhvOnexIndicateResult: DOT11EXTIHV_ONEX_INDICATE_RESULT, + Dot11ExtIhvControl: DOT11EXTIHV_CONTROL, +}} +pub type PDOT11EXT_IHV_HANDLERS = *mut DOT11EXT_IHV_HANDLERS; +STRUCT!{struct DOT11EXT_VIRTUAL_STATION_APIS { + Dot11ExtRequestVirtualStation: DOT11EXT_REQUEST_VIRTUAL_STATION, + Dot11ExtReleaseVirtualStation: DOT11EXT_RELEASE_VIRTUAL_STATION, + Dot11ExtQueryVirtualStationProperties: DOT11EXT_QUERY_VIRTUAL_STATION_PROPERTIES, + Dot11ExtSetVirtualStationAPProperties: DOT11EXT_SET_VIRTUAL_STATION_AP_PROPERTIES, +}} +pub type PDOT11EXT_VIRTUAL_STATION_APIS = *mut DOT11EXT_VIRTUAL_STATION_APIS; +STRUCT!{struct DOT11_IHV_VERSION_INFO { + dwVerMin: DWORD, + dwVerMax: DWORD, +}} +pub type PDOT11_IHV_VERSION_INFO = *mut DOT11_IHV_VERSION_INFO; +ENUM!{enum DOT11EXT_IHV_CONNECTION_PHASE { + connection_phase_any = 0, + connection_phase_initial_connection = 1, + connection_phase_post_l3_connection = 2, +}} +pub type PDOT11EXT_IHV_CONNECTION_PHASE = *mut DOT11EXT_IHV_CONNECTION_PHASE; +STRUCT!{struct DOT11EXT_IHV_UI_REQUEST { + dwSessionId: DWORD, + guidUIRequest: GUID, + UIPageClsid: CLSID, + dwByteCount: DWORD, + pvUIRequest: *mut BYTE, +}} +pub type PDOT11EXT_IHV_UI_REQUEST = *mut DOT11EXT_IHV_UI_REQUEST; +ENUM!{enum DOT11_MSONEX_RESULT { + DOT11_MSONEX_SUCCESS = 0, + DOT11_MSONEX_FAILURE = 1, + DOT11_MSONEX_IN_PROGRESS = 2, +}} +pub type PDOT11_MSONEX_RESULT = *mut DOT11_MSONEX_RESULT; +STRUCT!{struct DOT11_EAP_RESULT { + dwFailureReasonCode: UINT32, + pAttribArray: *mut EAP_ATTRIBUTES, +}} +pub type PDOT11_EAP_RESULT = *mut DOT11_EAP_RESULT; +STRUCT!{struct DOT11_MSONEX_RESULT_PARAMS { + Dot11OnexAuthStatus: ONEX_AUTH_STATUS, + Dot11OneXReasonCode: ONEX_REASON_CODE, + pbMPPESendKey: PBYTE, + dwMPPESendKeyLen: DWORD, + pbMPPERecvKey: PBYTE, + dwMPPERecvKeyLen: DWORD, + pDot11EapResult: PDOT11_EAP_RESULT, +}} +pub type PDOT11_MSONEX_RESULT_PARAMS = *mut DOT11_MSONEX_RESULT_PARAMS; +STRUCT!{struct DOT11EXT_IHV_CONNECTIVITY_PROFILE { + pszXmlFragmentIhvConnectivity: LPWSTR, +}} +pub type PDOT11EXT_IHV_CONNECTIVITY_PROFILE = *mut DOT11EXT_IHV_CONNECTIVITY_PROFILE; +STRUCT!{struct DOT11EXT_IHV_SECURITY_PROFILE { + pszXmlFragmentIhvSecurity: LPWSTR, + bUseMSOnex: BOOL, +}} +pub type PDOT11EXT_IHV_SECURITY_PROFILE = *mut DOT11EXT_IHV_SECURITY_PROFILE; +STRUCT!{struct DOT11EXT_IHV_DISCOVERY_PROFILE { + IhvConnectivityProfile: DOT11EXT_IHV_CONNECTIVITY_PROFILE, + IhvSecurityProfile: DOT11EXT_IHV_SECURITY_PROFILE, +}} +pub type PDOT11EXT_IHV_DISCOVERY_PROFILE = *mut DOT11EXT_IHV_DISCOVERY_PROFILE; +STRUCT!{struct DOT11EXT_IHV_DISCOVERY_PROFILE_LIST { + dwCount: DWORD, + pIhvDiscoveryProfiles: PDOT11EXT_IHV_DISCOVERY_PROFILE, +}} +pub type PDOT11EXT_IHV_DISCOVERY_PROFILE_LIST = *mut DOT11EXT_IHV_DISCOVERY_PROFILE_LIST; +ENUM!{enum DOT11EXT_IHV_INDICATION_TYPE { + IndicationTypeNicSpecificNotification = 0, + IndicationTypePmkidCandidateList = 1, + IndicationTypeTkipMicFailure = 2, + IndicationTypePhyStateChange = 3, + IndicationTypeLinkQuality = 4, +}} +pub type PDOT11EXT_IHV_INDICATION_TYPE = *mut DOT11EXT_IHV_INDICATION_TYPE; +pub const DOT11EXT_PSK_MAX_LENGTH: usize = 64; +STRUCT!{struct DOT11EXT_VIRTUAL_STATION_AP_PROPERTY { + dot11SSID: DOT11_SSID, + dot11AuthAlgo: DOT11_AUTH_ALGORITHM, + dot11CipherAlgo: DOT11_CIPHER_ALGORITHM, + bIsPassPhrase: BOOL, + dwKeyLength: DWORD, + ucKeyData: [UCHAR; DOT11EXT_PSK_MAX_LENGTH], +}} +pub type PDOT11EXT_VIRTUAL_STATION_AP_PROPERTY = *mut DOT11EXT_VIRTUAL_STATION_AP_PROPERTY; +pub const WDIAG_IHV_WLAN_ID_FLAG_SECURITY_ENABLED: DWORD = 0x00000001; +STRUCT!{struct WDIAG_IHV_WLAN_ID { + strProfileName: [WCHAR; MS_MAX_PROFILE_NAME_LENGTH], + Ssid: DOT11_SSID, + BssType: DOT11_BSS_TYPE, + dwFlags: DWORD, + dwReasonCode: DWORD, +}} +pub type PWDIAG_IHV_WLAN_ID = *mut WDIAG_IHV_WLAN_ID; +FN!{stdcall DOT11EXT_ALLOCATE_BUFFER( + dwByteCount: DWORD, + ppvBuffer: *mut LPVOID, +) -> DWORD} +FN!{stdcall DOT11EXT_FREE_BUFFER( + pvMemory: LPVOID, +) -> ()} +FN!{stdcall DOT11EXT_SET_PROFILE_CUSTOM_USER_DATA( + hDot11SvcHandle: HANDLE, + hConnectSession: HANDLE, + dwSessionID: DWORD, + dwDataSize: DWORD, + pvData: LPVOID, +) -> DWORD} +FN!{stdcall DOT11EXT_GET_PROFILE_CUSTOM_USER_DATA( + hDot11SvcHandle: HANDLE, + hConnectSession: HANDLE, + dwSessionID: DWORD, + pdwDataSize: *mut DWORD, + ppvData: *mut LPVOID, +) -> DWORD} +FN!{stdcall DOT11EXT_SET_CURRENT_PROFILE( + hDot11SvcHandle: HANDLE, + hConnectSession: HANDLE, + pIhvConnProfile: PDOT11EXT_IHV_CONNECTIVITY_PROFILE, + pIhvSecProfile: PDOT11EXT_IHV_SECURITY_PROFILE, +) -> DWORD} +FN!{stdcall DOT11EXT_SEND_UI_REQUEST( + hDot11SvcHandle: HANDLE, + pIhvUIRequest: PDOT11EXT_IHV_UI_REQUEST, +) -> DWORD} +FN!{stdcall DOT11EXT_PRE_ASSOCIATE_COMPLETION( + hDot11SvcHandle: HANDLE, + hConnectSession: HANDLE, + dwReasonCode: DWORD, + dwWin32Error: DWORD, +) -> DWORD} +FN!{stdcall DOT11EXT_POST_ASSOCIATE_COMPLETION( + hDot11SvcHandle: HANDLE, + hSecuritySessionID: HANDLE, + pPeer: PDOT11_MAC_ADDRESS, + dwReasonCode: DWORD, + dwWin32Error: DWORD, +) -> DWORD} +FN!{stdcall DOT11EXT_SEND_NOTIFICATION( + hDot11SvcHandle: HANDLE, + pNotificationData: PL2_NOTIFICATION_DATA, +) -> DWORD} +FN!{stdcall DOT11EXT_SEND_PACKET( + hDot11SvcHandle: HANDLE, + uPacketLen: ULONG, + pvPacket: LPVOID, + hSendCompletion: HANDLE, +) -> DWORD} +FN!{stdcall DOT11EXT_SET_ETHERTYPE_HANDLING( + hDot11SvcHandle: HANDLE, + uMaxBackLog: ULONG, + uNumOfExemption: ULONG, + pExemption: PDOT11_PRIVACY_EXEMPTION, + uNumOfRegistration: ULONG, + pusRegistration: *mut USHORT, +) -> DWORD} +FN!{stdcall DOT11EXT_SET_AUTH_ALGORITHM( + hDot11SvcHandle: HANDLE, + dwAuthAlgo: DWORD, +) -> DWORD} +FN!{stdcall DOT11EXT_SET_UNICAST_CIPHER_ALGORITHM( + hDot11SvcHandle: HANDLE, + dwUnicastCipherAlgo: DWORD, +) -> DWORD} +FN!{stdcall DOT11EXT_SET_MULTICAST_CIPHER_ALGORITHM( + hDot11SvcHandle: HANDLE, + dwMulticastCipherAlgo: DWORD, +) -> DWORD} +FN!{stdcall DOT11EXT_SET_DEFAULT_KEY( + hDot11SvcHandle: HANDLE, + pKey: PDOT11_CIPHER_DEFAULT_KEY_VALUE, + dot11Direction: DOT11_DIRECTION, +) -> DWORD} +FN!{stdcall DOT11EXT_SET_KEY_MAPPING_KEY( + hDot11SvcHandle: HANDLE, + pKey: PDOT11_CIPHER_KEY_MAPPING_KEY_VALUE, +) -> DWORD} +FN!{stdcall DOT11EXT_SET_DEFAULT_KEY_ID( + hDot11SvcHandle: HANDLE, + uDefaultKeyId: ULONG, +) -> DWORD} +FN!{stdcall DOT11EXT_SET_EXCLUDE_UNENCRYPTED( + hDot11SvcHandle: HANDLE, + bExcludeUnencrypted: BOOL, +) -> DWORD} +FN!{stdcall DOT11EXT_NIC_SPECIFIC_EXTENSION( + hDot11SvcHandle: HANDLE, + dwInBufferSize: DWORD, + pvInBuffer: LPVOID, + pdwOutBufferSize: *mut DWORD, + pvOutBuffer: LPVOID, +) -> DWORD} +FN!{stdcall DOT11EXT_ONEX_START( + hDot11SvcHandle: HANDLE, + pEapAttributes: *mut EAP_ATTRIBUTES, +) -> DWORD} +FN!{stdcall DOT11EXT_ONEX_STOP( + hDot11SvcHandle: HANDLE, +) -> DWORD} +FN!{stdcall DOT11EXT_PROCESS_ONEX_PACKET( + hDot11SvcHandle: HANDLE, + dwInPacketSize: DWORD, + pvInPacket: LPVOID, +) -> DWORD} +FN!{stdcall DOT11EXT_REQUEST_VIRTUAL_STATION( + hDot11PrimaryHandle: HANDLE, + pvReserved: LPVOID, +) -> DWORD} +FN!{stdcall DOT11EXT_RELEASE_VIRTUAL_STATION( + hDot11PrimaryHandle: HANDLE, + pvReserved: LPVOID, +) -> DWORD} +FN!{stdcall DOT11EXT_QUERY_VIRTUAL_STATION_PROPERTIES( + hDot11SvcHandle: HANDLE, + pbIsVirtualStation: *mut BOOL, + pgPrimary: *mut GUID, + pvReserved: LPVOID, +) -> DWORD} +FN!{stdcall DOT11EXT_SET_VIRTUAL_STATION_AP_PROPERTIES( + hDot11SvcHandle: HANDLE, + hConnectSession: HANDLE, + dwNumProperties: DWORD, + pProperties: PDOT11EXT_VIRTUAL_STATION_AP_PROPERTY, + pvReserved: LPVOID, +) -> DWORD} +pub const IHV_VERSION_FUNCTION_NAME: &'static str = "Dot11ExtIhvGetVersionInfo"; +pub const IHV_INIT_FUNCTION_NAME: &'static str = "Dot11ExtIhvInitService"; +pub const IHV_INIT_VS_FUNCTION_NAME: &'static str = "Dot11ExtIhvInitVirtualStation"; +FN!{stdcall DOT11EXTIHV_GET_VERSION_INFO( + pDot11IHVVersionInfo: PDOT11_IHV_VERSION_INFO, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_INIT_SERVICE( + dwVerNumUsed: DWORD, + pDot11ExtAPI: PDOT11EXT_APIS, + pvReserved: LPVOID, + pDot11IHVHandlers: PDOT11EXT_IHV_HANDLERS, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_INIT_VIRTUAL_STATION( + pDot11ExtVSAPI: PDOT11EXT_VIRTUAL_STATION_APIS, + pvReserved: LPVOID, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_DEINIT_SERVICE() -> ()} +FN!{stdcall DOT11EXTIHV_INIT_ADAPTER( + pDot11Adapter: PDOT11_ADAPTER, + hDot11SvcHandle: HANDLE, + phIhvExtAdapter: PHANDLE, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_DEINIT_ADAPTER( + hIhvExtAdapter: HANDLE, +) -> ()} +FN!{stdcall DOT11EXTIHV_PERFORM_PRE_ASSOCIATE( + hIhvExtAdapter: HANDLE, + hConnectSession: HANDLE, + pIhvProfileParams: PDOT11EXT_IHV_PROFILE_PARAMS, + pIhvConnProfile: PDOT11EXT_IHV_CONNECTIVITY_PROFILE, + pIhvSecProfile: PDOT11EXT_IHV_SECURITY_PROFILE, + pConnectableBssid: PDOT11_BSS_LIST, + pdwReasonCode: PDWORD, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_ADAPTER_RESET( + hIhvExtAdapter: HANDLE, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_PERFORM_POST_ASSOCIATE( + hIhvExtAdapter: HANDLE, + hSecuritySessionID: HANDLE, + pPortState: PDOT11_PORT_STATE, + uDot11AssocParamsBytes: ULONG, + pDot11AssocParams: PDOT11_ASSOCIATION_COMPLETION_PARAMETERS, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_STOP_POST_ASSOCIATE( + hIhvExtAdapter: HANDLE, + pPeer: PDOT11_MAC_ADDRESS, + dot11AssocStatus: DOT11_ASSOC_STATUS, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_VALIDATE_PROFILE( + hIhvExtAdapter: HANDLE, + pIhvProfileParams: PDOT11EXT_IHV_PROFILE_PARAMS, + pIhvConnProfile: PDOT11EXT_IHV_CONNECTIVITY_PROFILE, + pIhvSecProfile: PDOT11EXT_IHV_SECURITY_PROFILE, + pdwReasonCode: PDWORD, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_PERFORM_CAPABILITY_MATCH( + hIhvExtAdapter: HANDLE, + pIhvProfileParams: PDOT11EXT_IHV_PROFILE_PARAMS, + pIhvConnProfile: PDOT11EXT_IHV_CONNECTIVITY_PROFILE, + pIhvSecProfile: PDOT11EXT_IHV_SECURITY_PROFILE, + pConnectableBssid: PDOT11_BSS_LIST, + pdwReasonCode: PDWORD, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_CREATE_DISCOVERY_PROFILES( + hIhvExtAdapter: HANDLE, + bInsecure: BOOL, + pIhvProfileParams: PDOT11EXT_IHV_PROFILE_PARAMS, + pConnectableBssid: PDOT11_BSS_LIST, + pIhvDiscoveryProfileList: PDOT11EXT_IHV_DISCOVERY_PROFILE_LIST, + pdwReasonCode: PDWORD, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_PROCESS_SESSION_CHANGE( + uEventType: ULONG, + pSessionNotification: PWTSSESSION_NOTIFICATION, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_RECEIVE_INDICATION( + hIhvExtAdapter: HANDLE, + indicationType: DOT11EXT_IHV_INDICATION_TYPE, + uBufferLength: ULONG, + pvBuffer: LPVOID, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_RECEIVE_PACKET( + hIhvExtAdapter: HANDLE, + dwInBufferSize: DWORD, + pvInBuffer: LPVOID, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_SEND_PACKET_COMPLETION( + hSendCompletion: HANDLE, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_IS_UI_REQUEST_PENDING( + guidUIRequest: GUID, + pbIsRequestPending: PBOOL, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_PROCESS_UI_RESPONSE( + guidUIRequest: GUID, + dwByteCount: DWORD, + pvResponseBuffer: LPVOID, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_QUERY_UI_REQUEST( + hIhvExtAdapter: HANDLE, + connectionPhase: DOT11EXT_IHV_CONNECTION_PHASE, + ppIhvUIRequest: *mut PDOT11EXT_IHV_UI_REQUEST, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_ONEX_INDICATE_RESULT( + hIhvExtAdapter: HANDLE, + msOneXResult: DOT11_MSONEX_RESULT, + pDot11MsOneXResultParams: PDOT11_MSONEX_RESULT_PARAMS, +) -> DWORD} +FN!{stdcall DOT11EXTIHV_CONTROL( + hIhvExtAdapter: HANDLE, + dwInBufferSize: DWORD, + pInBuffer: PBYTE, + dwOutBufferSize: DWORD, + pOutBuffer: PBYTE, + pdwBytesReturned: PDWORD, +) -> DWORD} diff -Nru cargo-0.44.1/vendor/winapi/src/um/wlanihvtypes.rs cargo-0.47.0/vendor/winapi/src/um/wlanihvtypes.rs --- cargo-0.44.1/vendor/winapi/src/um/wlanihvtypes.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/wlanihvtypes.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,41 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +//! Structures used to hold information for IHV. +use shared::guiddef::GUID; +use shared::minwindef::{BOOL, BYTE, DWORD, ULONG}; +use shared::wlantypes::{DOT11_AUTH_ALGORITHM, DOT11_BSS_TYPE, DOT11_CIPHER_ALGORITHM, DOT11_SSID}; +use um::eaptypes::EAP_METHOD_TYPE; +use um::winnt::WCHAR; +STRUCT!{struct DOT11_MSSECURITY_SETTINGS { + dot11AuthAlgorithm: DOT11_AUTH_ALGORITHM, + dot11CipherAlgorithm: DOT11_CIPHER_ALGORITHM, + fOneXEnabled: BOOL, + eapMethodType: EAP_METHOD_TYPE, + dwEapConnectionDataLen: DWORD, + pEapConnectionData: *mut BYTE, +}} +pub type PDOT11_MSSECURITY_SETTINGS = *mut DOT11_MSSECURITY_SETTINGS; +STRUCT!{struct DOT11EXT_IHV_SSID_LIST { + ulCount: ULONG, + SSIDs: [DOT11_SSID; 1], +}} +pub type PDOT11EXT_IHV_SSID_LIST = *mut DOT11EXT_IHV_SSID_LIST; +STRUCT!{struct DOT11EXT_IHV_PROFILE_PARAMS { + pSsidList: PDOT11EXT_IHV_SSID_LIST, + BssType: DOT11_BSS_TYPE, + pMSSecuritySettings: PDOT11_MSSECURITY_SETTINGS, +}} +pub type PDOT11EXT_IHV_PROFILE_PARAMS = *mut DOT11EXT_IHV_PROFILE_PARAMS; +pub const MS_MAX_PROFILE_NAME_LENGTH: usize = 256; +pub const MS_PROFILE_GROUP_POLICY: DWORD = 0x00000001; +pub const MS_PROFILE_USER: DWORD = 0x00000002; +STRUCT!{struct DOT11EXT_IHV_PARAMS { + dot11ExtIhvProfileParams: DOT11EXT_IHV_PROFILE_PARAMS, + wstrProfileName: [WCHAR; MS_MAX_PROFILE_NAME_LENGTH], + dwProfileTypeFlags: DWORD, + interfaceGuid: GUID, +}} +pub type PDOT11EXT_IHV_PARAMS = *mut DOT11EXT_IHV_PARAMS; diff -Nru cargo-0.44.1/vendor/winapi/src/um/wlclient.rs cargo-0.47.0/vendor/winapi/src/um/wlclient.rs --- cargo-0.44.1/vendor/winapi/src/um/wlclient.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/wlclient.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,33 @@ +// Licensed under the Apache License, Version 2.0 +// or the MIT license +// , at your option. +// All files in the project carrying such notice may not be copied, modified, or distributed +// except according to those terms. +use shared::guiddef::GUID; +use shared::minwindef::{BOOL, PUCHAR, UCHAR, ULONG, USHORT}; +use shared::windot11::{DOT11_CURRENT_OPERATION_MODE, DOT11_MAC_ADDRESS}; +use um::winnt::LPWSTR; +STRUCT!{struct DOT11_ADAPTER { + gAdapterId: GUID, + pszDescription: LPWSTR, + Dot11CurrentOpMode: DOT11_CURRENT_OPERATION_MODE, +}} +pub type PDOT11_ADAPTER = *mut DOT11_ADAPTER; +STRUCT!{struct DOT11_BSS_LIST { + uNumOfBytes: ULONG, + pucBuffer: PUCHAR, +}} +pub type PDOT11_BSS_LIST = *mut DOT11_BSS_LIST; +STRUCT!{struct DOT11_PORT_STATE { + PeerMacAddress: DOT11_MAC_ADDRESS, + uSessionId: ULONG, + bPortControlled: BOOL, + bPortAuthorized: BOOL, +}} +pub type PDOT11_PORT_STATE = *mut DOT11_PORT_STATE; +STRUCT!{#[repr(packed)] struct DOT11_SECURITY_PACKET_HEADER { + PeerMac: DOT11_MAC_ADDRESS, + usEtherType: USHORT, + Data: [UCHAR; 1], +}} +pub type PDOT11_SECURITY_PACKET_HEADER = *mut DOT11_SECURITY_PACKET_HEADER; diff -Nru cargo-0.44.1/vendor/winapi/src/um/wow64apiset.rs cargo-0.47.0/vendor/winapi/src/um/wow64apiset.rs --- cargo-0.44.1/vendor/winapi/src/um/wow64apiset.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/wow64apiset.rs 2020-10-01 21:38:28.000000000 +0000 @@ -3,7 +3,7 @@ // , at your option. // All files in the project carrying such notice may not be copied, modified, or distributed // except according to those terms. -use shared::minwindef::{BOOL, PBOOL, UINT}; +use shared::minwindef::{BOOL, PBOOL, PUSHORT, UINT}; use um::winnt::{HANDLE, LPSTR, LPWSTR, PVOID}; extern "system" { pub fn Wow64DisableWow64FsRedirection( @@ -24,4 +24,9 @@ lpBuffer: LPWSTR, uSize: UINT, ) -> UINT; + pub fn IsWow64Process2( + hProcess: HANDLE, + pProcessMachine: PUSHORT, + pNativeMachine: PUSHORT, + ) -> BOOL; } diff -Nru cargo-0.44.1/vendor/winapi/src/um/ws2bth.rs cargo-0.47.0/vendor/winapi/src/um/ws2bth.rs --- cargo-0.44.1/vendor/winapi/src/um/ws2bth.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/ws2bth.rs 2020-10-01 21:38:28.000000000 +0000 @@ -93,7 +93,7 @@ pub const MSC_FC_BIT: UCHAR = BIT!(1); pub const MSC_RTC_BIT: UCHAR = BIT!(2); pub const MSC_RTR_BIT: UCHAR = BIT!(3); -pub const MSC_RESERVED: UCHAR = (BIT!(4) | BIT!(5)); +pub const MSC_RESERVED: UCHAR = BIT!(4) | BIT!(5); pub const MSC_IC_BIT: UCHAR = BIT!(6); pub const MSC_DV_BIT: UCHAR = BIT!(7); pub const MSC_BREAK_BIT: UCHAR = BIT!(1); diff -Nru cargo-0.44.1/vendor/winapi/src/um/wtsapi32.rs cargo-0.47.0/vendor/winapi/src/um/wtsapi32.rs --- cargo-0.44.1/vendor/winapi/src/um/wtsapi32.rs 1970-01-01 00:00:00.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/wtsapi32.rs 2020-10-01 21:38:28.000000000 +0000 @@ -0,0 +1,6 @@ +use shared::minwindef::BOOL; +use shared::ntdef::{PHANDLE, ULONG}; +//1286 +extern "system" { + pub fn WTSQueryUserToken(SessionId: ULONG, phToken: PHANDLE) -> BOOL; +} diff -Nru cargo-0.44.1/vendor/winapi/src/um/xinput.rs cargo-0.47.0/vendor/winapi/src/um/xinput.rs --- cargo-0.44.1/vendor/winapi/src/um/xinput.rs 2020-05-27 21:15:58.000000000 +0000 +++ cargo-0.47.0/vendor/winapi/src/um/xinput.rs 2020-10-01 21:38:28.000000000 +0000 @@ -9,13 +9,21 @@ use um::winnt::{LPWSTR, SHORT, WCHAR}; pub const XINPUT_DEVTYPE_GAMEPAD: BYTE = 0x01; pub const XINPUT_DEVSUBTYPE_GAMEPAD: BYTE = 0x01; +pub const XINPUT_DEVSUBTYPE_UNKNOWN: BYTE = 0x00; pub const XINPUT_DEVSUBTYPE_WHEEL: BYTE = 0x02; pub const XINPUT_DEVSUBTYPE_ARCADE_STICK: BYTE = 0x03; pub const XINPUT_DEVSUBTYPE_FLIGHT_SICK: BYTE = 0x04; pub const XINPUT_DEVSUBTYPE_DANCE_PAD: BYTE = 0x05; pub const XINPUT_DEVSUBTYPE_GUITAR: BYTE = 0x06; +pub const XINPUT_DEVSUBTYPE_GUITAR_ALTERNATE: BYTE = 0x07; pub const XINPUT_DEVSUBTYPE_DRUM_KIT: BYTE = 0x08; +pub const XINPUT_DEVSUBTYPE_GUITAR_BASS: BYTE = 0x0B; +pub const XINPUT_DEVSUBTYPE_ARCADE_PAD: BYTE = 0x13; pub const XINPUT_CAPS_VOICE_SUPPORTED: WORD = 0x0004; +pub const XINPUT_CAPS_FFB_SUPPORTED: WORD = 0x0001; +pub const XINPUT_CAPS_WIRELESS: WORD = 0x0002; +pub const XINPUT_CAPS_PMD_SUPPORTED: WORD = 0x0008; +pub const XINPUT_CAPS_NO_NAVIGATION: WORD = 0x0010; pub const XINPUT_GAMEPAD_DPAD_UP: WORD = 0x0001; pub const XINPUT_GAMEPAD_DPAD_DOWN: WORD = 0x0002; pub const XINPUT_GAMEPAD_DPAD_LEFT: WORD = 0x0004;